diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d1e848e2..7967152b 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -20,7 +20,7 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache-datadriven @@ -29,7 +29,7 @@ jobs: pip install coverage pip install importlib_metadata - name: Update - run: pip install --upgrade --upgrade-strategy eager -e . + run: pip install --upgrade --upgrade-strategy eager -e .[datadriven] - name: Run coverage run: | coverage run -m tests.test_base_models diff --git a/.github/workflows/manual_release.yml b/.github/workflows/manual_release.yml index 7c316f31..26291175 100644 --- a/.github/workflows/manual_release.yml +++ b/.github/workflows/manual_release.yml @@ -1,12 +1,13 @@ -name: Manual Release +name: Matrixed Tests on: workflow_dispatch jobs: - analysis: - timeout-minutes: 30 + run_tests: + timeout-minutes: 45 strategy: + fail-fast: false matrix: python-version: ['3.9', '3.12'] # TODO: Add 3.13 when it is released os: [ubuntu-latest, macos-latest, macos-13, windows-latest] @@ -17,11 +18,12 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Update - run: pip install --upgrade --upgrade-strategy eager -e '.[datadriven, tests]' + - name: Install + run: pip install --upgrade --upgrade-strategy eager -e '.[datadriven, test]' - name: Run tests run: python -m tests - name: Upload coverage to Codecov + if: ${{ matrix.python-version }} == '3.12' && ${{ matrix.os }} == 'ubuntu-latest' uses: codecov/codecov-action@v3 with: - file: ./coverage.xml + file: ./coverage.xml \ No newline at end of file diff --git a/.github/workflows/print-benchmarking.yml b/.github/workflows/print-benchmarking.yml index 354e49ce..3213d4d0 100644 --- a/.github/workflows/print-benchmarking.yml +++ b/.github/workflows/print-benchmarking.yml @@ -1,7 +1,15 @@ -# name: Print Benchmarking +name: Print Benchmarking -# on: pull_request +on: pull_request +jobs: + benchmark_branch: + timeout-minutes: 5 + runs-on: ubuntu-latest + steps: + - name: Placeholder + run: echo "Placholder benchmark in process of getting fixed - job will pass." + # jobs: # benchmark_branch: # timeout-minutes: 20 diff --git a/.github/workflows/python-package-manual.yml b/.github/workflows/python-package-manual.yml new file mode 100644 index 00000000..c8fc7195 --- /dev/null +++ b/.github/workflows/python-package-manual.yml @@ -0,0 +1,28 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Manual Tests + +on: + workflow_dispatch: + +jobs: + test_tutorials_part_2: + timeout-minutes: 35 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Install dependencies cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-cache-datadriven + - name: Update + run: | + pip install --upgrade --upgrade-strategy eager -e .[datadriven,test] + - name: Run tests + run: python -m tests.test_tutorials_part_2 \ No newline at end of file diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index ef54c36e..33b9011d 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -18,9 +18,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -36,9 +36,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -54,9 +54,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -72,9 +72,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -90,9 +90,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -108,9 +108,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache-datadriven @@ -126,9 +126,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -146,9 +146,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -164,9 +164,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -182,9 +182,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -200,9 +200,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -210,24 +210,12 @@ jobs: run: pip install --upgrade --upgrade-strategy eager -e . - name: Run tests run: python -m tests.test_estimate_params - # test_examples: - # timeout-minutes: 25 - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v3 - # - name: Set up Python - # uses: actions/setup-python@v4 - # with: - # python-version: '3.7' - # - name: Install dependencies cache - # uses: actions/cache@v2 - # with: - # path: ~/.cache/pip - # key: pip-cache - # - name: Update - # run: pip install --upgrade --upgrade-strategy eager -e . - # - name: Run tests - # run: python -m tests.test_examples + test_examples: + timeout-minutes: 5 + runs-on: ubuntu-latest + steps: + - name: Placeholder + run: echo "Placholder test in process of getting removed - job will pass." test_horizon: timeout-minutes: 5 runs-on: ubuntu-latest @@ -236,9 +224,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -254,9 +242,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -272,9 +260,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -290,9 +278,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -308,9 +296,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -326,9 +314,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -344,14 +332,14 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip - key: pip-cache + key: pip-cache-datadriven - name: Update - run: pip install --upgrade --upgrade-strategy eager -e . + run: pip install --upgrade --upgrade-strategy eager -e .[datadriven] - name: Run tests run: python -m tests.test_predictors test_serialization: @@ -362,14 +350,14 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip - key: pip-cache + key: pip-cache-datadriven - name: Update - run: pip install --upgrade --upgrade-strategy eager -e . + run: pip install --upgrade --upgrade-strategy eager -e .[datadriven] - name: Run tests run: python -m tests.test_serialization test_sim_result: @@ -380,9 +368,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -398,14 +386,14 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip - key: pip-cache + key: pip-cache-datadriven - name: Update - run: pip install --upgrade --upgrade-strategy eager -e . + run: pip install --upgrade --upgrade-strategy eager -e .[datadriven] - name: Run tests run: python -m tests.test_state_estimators test_surrogates: @@ -416,9 +404,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache-datadriven @@ -426,27 +414,45 @@ jobs: run: pip install --upgrade --upgrade-strategy eager -e .[datadriven] - name: Run tests run: python -m tests.test_surrogates - # test_tutorials: - # timeout-minutes: 5 - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v3 - # - name: Set up Python - # uses: actions/setup-python@v4 - # with: - # python-version: '3.7' - # - name: Install dependencies cache - # uses: actions/cache@v2 - # with: - # path: ~/.cache/pip - # key: pip-cache-datadriven - # - name: Update - # run: | - # pip install --upgrade --upgrade-strategy eager -e . - # pip install notebook - # pip install testbook - # - name: Run tests - # run: python -m tests.test_tutorials + test_tutorials: + timeout-minutes: 5 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Install dependencies cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-cache-datadriven + - name: Update + run: | + pip install --upgrade --upgrade-strategy eager -e .[datadriven,test] + - name: Run tests + run: python -m tests.test_tutorials + test_tutorials_part_3: + timeout-minutes: 15 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Install dependencies cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-cache-datadriven + - name: Update + run: | + pip install --upgrade --upgrade-strategy eager -e .[datadriven,test] + pip install prog_server + - name: Run tests + run: python -m tests.test_tutorials_part_3 test_uav_model: timeout-minutes: 10 runs-on: ubuntu-latest @@ -455,9 +461,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -473,9 +479,9 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.9' - name: Install dependencies cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 0bca0ddb..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Release - -on: - push: - branches: - - 'release/**' - - 'master' - -jobs: - analysis: - timeout-minutes: 30 - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ['3.9'] - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - python -m pip install -e . - - name: Lint with flake8 - run: | - python -m pip install flake8 - # stop the build if there are Python syntax errors or undefined names - flake8 src/prog_models --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 src/prog_models --count --benchmark --exit-zero --show-source --max-complexity=10 --max-line-length=127 --statistics --tee --output-file=lint_results_${{ matrix.python-version }}.txt - - name: Upload Lint Results - uses: actions/upload-artifact@v3 - with: - name: lint_results_${{matrix.python-version}} - path: lint_results_${{matrix.python-version}}.txt diff --git a/.github/workflows/update-cache.yml b/.github/workflows/update-cache.yml index 608ad5c7..f7ad5226 100644 --- a/.github/workflows/update-cache.yml +++ b/.github/workflows/update-cache.yml @@ -16,11 +16,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install -e .[datadriven] - python -m pip install notebook - python -m pip install testbook - python -m pip install requests - - uses: actions/cache@v3 + python -m pip install -e .[datadriven,test] + - uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache @@ -40,11 +37,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install -e '.[datadriven]' - python -m pip install notebook - python -m pip install testbook - python -m pip install requests - - uses: actions/cache@v3 + python -m pip install -e '.[datadriven,test]' + - uses: actions/cache@v4 with: path: ~/.cache/pip key: pip-cache-datadriven diff --git a/README.md b/README.md index 82dd86ed..5e700670 100644 --- a/README.md +++ b/README.md @@ -41,15 +41,15 @@ Use the following to cite this repository: | author = {Christopher Teubert and Katelyn Jarvis Griffith and Matteo Corbetta and Chetan Kulkarni and Portia Banerjee and Matthew Daigle}, | title = {{ProgPy Python Prognostics Packages}}, | month = May, - | year = 2024, - | version = {1.7}, + | year = 2025, + | version = {1.8}, | url = {https://nasa.github.io/progpy} | } ``` The corresponding reference should look like this: -C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, M. Daigle, ProgPy Python Prognostics Packages, v1.7, May 2024. URL https://github.com/nasa/progpy. +C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, M. Daigle, ProgPy Python Prognostics Packages, v1.8, May 2025. URL https://github.com/nasa/progpy. ## Contributing Organizations ProgPy was created by a partnership of multiple organizations, working together to build a set of high-quality prognostic tools for the wider PHM Community. We would like to give a big thank you for the ProgPy community, especially the following contributing organizations: diff --git a/docs/.buildinfo b/docs/.buildinfo index 0af31a2c..585f98c4 100644 --- a/docs/.buildinfo +++ b/docs/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 7d941a484a4f3440c5e3ef5478d11930 +config: 509dd36345baaa5ea4fe15c084c142c8 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/.buildinfo.bak b/docs/.buildinfo.bak new file mode 100644 index 00000000..0af31a2c --- /dev/null +++ b/docs/.buildinfo.bak @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 7d941a484a4f3440c5e3ef5478d11930 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/.doctrees/api_ref/prog_server/load_ests.doctree b/docs/.doctrees/api_ref/prog_server/load_ests.doctree index 6c8c3309..438b721d 100644 Binary files a/docs/.doctrees/api_ref/prog_server/load_ests.doctree and b/docs/.doctrees/api_ref/prog_server/load_ests.doctree differ diff --git a/docs/.doctrees/api_ref/prog_server/prog_server.doctree b/docs/.doctrees/api_ref/prog_server/prog_server.doctree index 80d44d8e..ed2d4355 100644 Binary files a/docs/.doctrees/api_ref/prog_server/prog_server.doctree and b/docs/.doctrees/api_ref/prog_server/prog_server.doctree differ diff --git a/docs/.doctrees/api_ref/progpy.doctree b/docs/.doctrees/api_ref/progpy.doctree index 9938a9f5..a0359fce 100644 Binary files a/docs/.doctrees/api_ref/progpy.doctree and b/docs/.doctrees/api_ref/progpy.doctree differ diff --git a/docs/.doctrees/api_ref/progpy/DataModel.doctree b/docs/.doctrees/api_ref/progpy/DataModel.doctree index 9c25a799..3fd2bd38 100644 Binary files a/docs/.doctrees/api_ref/progpy/DataModel.doctree and b/docs/.doctrees/api_ref/progpy/DataModel.doctree differ diff --git a/docs/.doctrees/api_ref/progpy/DiscreteStates.doctree b/docs/.doctrees/api_ref/progpy/DiscreteStates.doctree new file mode 100644 index 00000000..956f1ad2 Binary files /dev/null and b/docs/.doctrees/api_ref/progpy/DiscreteStates.doctree differ diff --git a/docs/.doctrees/api_ref/progpy/IncludedModels.doctree b/docs/.doctrees/api_ref/progpy/IncludedModels.doctree index 2814c283..914f4c77 100644 Binary files a/docs/.doctrees/api_ref/progpy/IncludedModels.doctree and b/docs/.doctrees/api_ref/progpy/IncludedModels.doctree differ diff --git a/docs/.doctrees/api_ref/progpy/LinearModel.doctree b/docs/.doctrees/api_ref/progpy/LinearModel.doctree index 349c35c9..bddb6ef9 100644 Binary files a/docs/.doctrees/api_ref/progpy/LinearModel.doctree and b/docs/.doctrees/api_ref/progpy/LinearModel.doctree differ diff --git a/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree b/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree index e4eed8c0..c8d6f3cc 100644 Binary files a/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree and b/docs/.doctrees/api_ref/progpy/PrognosticModel.doctree differ diff --git a/docs/.doctrees/api_ref/progpy/StateEstimator.doctree b/docs/.doctrees/api_ref/progpy/StateEstimator.doctree index 9f7d1989..69a9d185 100644 Binary files a/docs/.doctrees/api_ref/progpy/StateEstimator.doctree and b/docs/.doctrees/api_ref/progpy/StateEstimator.doctree differ diff --git a/docs/.doctrees/api_ref/progpy/UncertainData.doctree b/docs/.doctrees/api_ref/progpy/UncertainData.doctree index 972adca4..6249c544 100644 Binary files a/docs/.doctrees/api_ref/progpy/UncertainData.doctree and b/docs/.doctrees/api_ref/progpy/UncertainData.doctree differ diff --git a/docs/.doctrees/dev_guide.doctree b/docs/.doctrees/dev_guide.doctree index cec3d9ed..90ed196d 100644 Binary files a/docs/.doctrees/dev_guide.doctree and b/docs/.doctrees/dev_guide.doctree differ diff --git a/docs/.doctrees/environment.pickle b/docs/.doctrees/environment.pickle index f634582e..58903c16 100644 Binary files a/docs/.doctrees/environment.pickle and b/docs/.doctrees/environment.pickle differ diff --git a/docs/.doctrees/glossary.doctree b/docs/.doctrees/glossary.doctree index 1c7e4a5a..895f25da 100644 Binary files a/docs/.doctrees/glossary.doctree and b/docs/.doctrees/glossary.doctree differ diff --git a/docs/.doctrees/guide.doctree b/docs/.doctrees/guide.doctree index 07a6e431..411b1e77 100644 Binary files a/docs/.doctrees/guide.doctree and b/docs/.doctrees/guide.doctree differ diff --git a/docs/.doctrees/index.doctree b/docs/.doctrees/index.doctree index 76ad0daf..e485b0e3 100644 Binary files a/docs/.doctrees/index.doctree and b/docs/.doctrees/index.doctree differ diff --git a/docs/.doctrees/installing.doctree b/docs/.doctrees/installing.doctree index 5befa11c..9d212238 100644 Binary files a/docs/.doctrees/installing.doctree and b/docs/.doctrees/installing.doctree differ diff --git a/docs/.doctrees/prog_algs_guide.doctree b/docs/.doctrees/prog_algs_guide.doctree index 2a7807a9..dfad894b 100644 Binary files a/docs/.doctrees/prog_algs_guide.doctree and b/docs/.doctrees/prog_algs_guide.doctree differ diff --git a/docs/.doctrees/prog_models_guide.doctree b/docs/.doctrees/prog_models_guide.doctree index 0272006d..1c48a34b 100644 Binary files a/docs/.doctrees/prog_models_guide.doctree and b/docs/.doctrees/prog_models_guide.doctree differ diff --git a/docs/.doctrees/prog_server_guide.doctree b/docs/.doctrees/prog_server_guide.doctree index 8dcfc67b..75db71f1 100644 Binary files a/docs/.doctrees/prog_server_guide.doctree and b/docs/.doctrees/prog_server_guide.doctree differ diff --git a/docs/.doctrees/releases.doctree b/docs/.doctrees/releases.doctree index 245bd6c6..855e7dbc 100644 Binary files a/docs/.doctrees/releases.doctree and b/docs/.doctrees/releases.doctree differ diff --git a/docs/.doctrees/troubleshooting.doctree b/docs/.doctrees/troubleshooting.doctree new file mode 100644 index 00000000..a4426b31 Binary files /dev/null and b/docs/.doctrees/troubleshooting.doctree differ diff --git a/docs/_downloads/00d36657bdd9c3fc811494e7586f6a86/horizon.py b/docs/_downloads/00d36657bdd9c3fc811494e7586f6a86/horizon.py index 3b161f7e..74d1ec24 100644 --- a/docs/_downloads/00d36657bdd9c3fc811494e7586f6a86/horizon.py +++ b/docs/_downloads/00d36657bdd9c3fc811494e7586f6a86/horizon.py @@ -2,12 +2,12 @@ """ This example performs a state estimation and prediction with uncertainty given a Prognostics Model with a specific prediction horizon. This prediction horizon marks the end of the "time of interest" for the prediction. Often this represents the end of a mission or sufficiently in the future where the user is unconcerned with the events - + Method: An instance of the Thrown Object model in progpy is created, and the prediction process is achieved in three steps: 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate 2) Prediction of future states (with uncertainty) and the times at which the event thresholds will be reached, within the prediction horizon. All events outside the horizon come back as None and are ignored in metrics -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) """ @@ -18,13 +18,18 @@ from progpy.uncertain_data import MultivariateNormalDist from pprint import pprint + def run_example(): # Step 1: Setup model & future loading m = ThrownObject(process_noise=0.5, measurement_noise=0.15) initial_state = m.initialize() NUM_SAMPLES = 1000 - x = MultivariateNormalDist(initial_state.keys(), initial_state.values(), np.diag([x_i*0.01 for x_i in initial_state.values()])) + x = MultivariateNormalDist( + initial_state.keys(), + initial_state.values(), + np.diag([x_i * 0.01 for x_i in initial_state.values()]), + ) # Step 2: Demonstrating Predictor print("\nPerforming Prediction Step...") @@ -38,20 +43,28 @@ def run_example(): # We're saying we are not interested in any events that occur after this time PREDICTION_HORIZON = 7.7 STEP_SIZE = 0.01 - mc_results = mc.predict(x, n_samples=NUM_SAMPLES,dt=STEP_SIZE, horizon = PREDICTION_HORIZON) - + mc_results = mc.predict( + x, n_samples=NUM_SAMPLES, dt=STEP_SIZE, horizon=PREDICTION_HORIZON + ) + print("\nPredicted Time of Event:") metrics = mc_results.time_of_event.metrics() pprint(metrics) # Note this takes some time - mc_results.time_of_event.plot_hist(keys = 'impact') - mc_results.time_of_event.plot_hist(keys = 'falling') + mc_results.time_of_event.plot_hist(keys="impact") + mc_results.time_of_event.plot_hist(keys="falling") + + print( + "\nSamples where impact occurs before horizon: {:.2f}%".format( + metrics["impact"]["number of samples"] / NUM_SAMPLES * 100 + ) + ) - print("\nSamples where impact occurs before horizon: {:.2f}%".format(metrics['impact']['number of samples']/NUM_SAMPLES*100)) - # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/05795c561b0fb31137ef5fd753641074/new_state_estimator_example.py b/docs/_downloads/05795c561b0fb31137ef5fd753641074/new_state_estimator_example.py index eb15d3af..dd750022 100644 --- a/docs/_downloads/05795c561b0fb31137ef5fd753641074/new_state_estimator_example.py +++ b/docs/_downloads/05795c561b0fb31137ef5fd753641074/new_state_estimator_example.py @@ -19,7 +19,8 @@ class BlindlyStumbleEstimator(StateEstimator): This state estimator was created by copying the state estimator template and filling out each function with the logic for this algorithm """ - def __init__(self, model, x0, measurement = None): + + def __init__(self, model, x0, measurement=None): """ Initialize the state estimator @@ -40,7 +41,10 @@ def estimate(self, t, u, z): z (dict): Measured output at time t """ # Generate new candidate state - x2 = {key : float(value) + 10*(random.random()-0.5) for (key,value) in self.state.items()} + x2 = { + key: float(value) + 10 * (random.random() - 0.5) + for (key, value) in self.state.items() + } # Calculate outputs z_est = self.m.output(t, self.state) @@ -51,7 +55,7 @@ def estimate(self, t, u, z): z_est2_score = sum([abs(z_est2[key] - z[key]) for key in self.m.outputs]) # Now choose the closer one - if z_est2_score < z_est_score: + if z_est2_score < z_est_score: self.state = x2 @property @@ -61,86 +65,95 @@ def x(self): """ return ScalarData(self.state) + # Model used in example -class ThrownObject(): +class ThrownObject: """ Model that similates an object thrown into the air without air resistance """ - inputs = [] # no inputs, no way to control + inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] - outputs = [ # Anything we can measure - 'x' # Position (m) + "x", # Position (m) + "v", # Velocity (m/s) + ] + outputs = [ # Anything we can measure + "x" # Position (m) ] events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] # The Default parameters. Overwritten by passing parameters dictionary into constructor parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81, # Acceleration due to gravity in m/s^2 - 'process_noise': 0.0 # amount of noise in each step + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 + "process_noise": 0.0, # amount of noise in each step } - def initialize(self, u = None, z = None): + def initialize(self, u=None, z=None): self.max_x = 0.0 return { - 'x': self.parameters['thrower_height'], # Thrown, so initial altitude is height of thrower - 'v': self.parameters['throwing_speed'] # Velocity at which the ball is thrown - this guy is an professional baseball pitcher - } - - def dx(self, t, x, u = None): + "x": self.parameters[ + "thrower_height" + ], # Thrown, so initial altitude is height of thrower + "v": self.parameters[ + "throwing_speed" + ], # Velocity at which the ball is thrown - this guy is an professional baseball pitcher + } + + def dx(self, t, x, u=None): # apply_process_noise is used to add process noise to each step return { - 'x': x['v'], - 'v': self.parameters['g'] # Acceleration of gravity + "x": x["v"], + "v": self.parameters["g"], # Acceleration of gravity } def output(self, t, x): - return { - 'x': x['x'] - } + return {"x": x["x"]} - def event_state(self, t, x): - self.max_x = max(self.max_x, x['x']) # Maximum altitude + def event_state(self, t, x): + self.max_x = max(self.max_x, x["x"]) # Maximum altitude return { - 'falling': max(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': max(x['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + x["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } + def run_example(): - # This example creates a new state estimator, instead of using the included algorihtms. + # This example creates a new state estimator, instead of using the included algorihtms. # The new state estimator was defined above and can now be used like the UKF or PF - + # First we define the model to be used with the state estimator m = ThrownObject() # Lets pretend we have no idea what the state is, we'll provide an estimate of 0 - x0 = {key : 0 for key in m.states} + x0 = {key: 0 for key in m.states} filt = BlindlyStumbleEstimator(m, x0) # Now lets simulate it forward and see what it looks like dt = 0.1 x = m.initialize() - print('t: {}. State: {} (Ground truth: {})'.format(0, filt.x.mean, x)) - for i in range(1, int(8.4/dt)): + print("t: {}. State: {} (Ground truth: {})".format(0, filt.x.mean, x)) + for i in range(1, int(8.4 / dt)): # Update ground truth state - x = {key : x[key] + m.dx(i*dt, x)[key] * dt for key in m.states} + x = {key: x[key] + m.dx(i * dt, x)[key] * dt for key in m.states} # Run estimation step - filt.estimate(i*dt, None, m.output(i*dt, x)) + filt.estimate(i * dt, None, m.output(i * dt, x)) # Print result - print('t: {}. State: {} (Ground truth: {})'.format(i*dt, filt.x.mean, x)) + print("t: {}. State: {} (Ground truth: {})".format(i * dt, filt.x.mean, x)) # The results probably should show that it is estimating the state with a significant delay -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/06717c7dc69742382bea70d35e4e63b5/sim_pump.py b/docs/_downloads/06717c7dc69742382bea70d35e4e63b5/sim_pump.py index 6103eb90..0a4a0314 100644 --- a/docs/_downloads/06717c7dc69742382bea70d35e4e63b5/sim_pump.py +++ b/docs/_downloads/06717c7dc69742382bea70d35e4e63b5/sim_pump.py @@ -2,59 +2,83 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a centrifugal pump being simulated until threshold is met. +Example of a centrifugal pump being simulated until threshold is met. """ from progpy.models import CentrifugalPump -def run_example(): + +def run_example(): # Step 1: Setup Pump - pump = CentrifugalPump(process_noise= 0) - pump.parameters['x0']['wA'] = 0.01 # Set Wear Rate + pump = CentrifugalPump(process_noise=0) + pump.parameters["x0"]["wA"] = 0.01 # Set Wear Rate # Step 2: Setup Future Loading cycle_time = 3600 + def future_loading(t, x=None): t = t % cycle_time - if t < cycle_time/2.0: + if t < cycle_time / 2.0: V = 471.2389 - elif t < cycle_time/2 + 100: - V = 471.2389 + (t-cycle_time/2) + elif t < cycle_time / 2 + 100: + V = 471.2389 + (t - cycle_time / 2) elif t < cycle_time - 100: V = 571.2389 else: - V = 471.2398 - (t-cycle_time) + V = 471.2398 - (t - cycle_time) - return pump.InputContainer({ - 'Tamb': 290, - 'V': V, - 'pdisch': 928654, - 'psuc': 239179, - 'wsync': V * 0.8 - }) + return pump.InputContainer( + {"Tamb": 290, "V": V, "pdisch": 928654, "psuc": 239179, "wsync": V * 0.8} + ) # Step 3: Sim - first_output = pump.output(pump.initialize(future_loading(0),{})) - config = { - 'horizon': 1e5, - 'save_freq': 1e3, - 'print': True - } - simulated_results = pump.simulate_to_threshold(future_loading, first_output, **config) + first_output = pump.output(pump.initialize(future_loading(0), {})) + config = {"horizon": 1e5, "save_freq": 1e3, "print": True} + simulated_results = pump.simulate_to_threshold( + future_loading, first_output, **config + ) # Step 4: Plot Results from progpy.visualize import plot_timeseries - plot_timeseries(simulated_results.times, simulated_results.inputs, options={'compact': False, 'title': 'Inputs', - 'xlabel': 'time', 'ylabel':{lbl: lbl for lbl in pump.inputs}}) - plot_timeseries(simulated_results.times, simulated_results.states, options={'compact': False, 'title': 'States', 'xlabel': 'time', 'ylabel': ''}) - plot_timeseries(simulated_results.times, simulated_results.outputs, options={'compact': False, 'title': 'Outputs', 'xlabel': 'time', 'ylabel': ''}) - plot_timeseries(simulated_results.times, simulated_results.event_states, options={'compact': False, 'title': 'Events', 'xlabel': 'time', 'ylabel': ''}) + + plot_timeseries( + simulated_results.times, + simulated_results.inputs, + options={ + "compact": False, + "title": "Inputs", + "xlabel": "time", + "ylabel": {lbl: lbl for lbl in pump.inputs}, + }, + ) + plot_timeseries( + simulated_results.times, + simulated_results.states, + options={"compact": False, "title": "States", "xlabel": "time", "ylabel": ""}, + ) + plot_timeseries( + simulated_results.times, + simulated_results.outputs, + options={"compact": False, "title": "Outputs", "xlabel": "time", "ylabel": ""}, + ) + plot_timeseries( + simulated_results.times, + simulated_results.event_states, + options={"compact": False, "title": "Events", "xlabel": "time", "ylabel": ""}, + ) thresholds_met = [pump.threshold_met(x) for x in simulated_results.states] - plot_timeseries(simulated_results.times, thresholds_met, options={'compact': True, 'title': 'Events', 'xlabel': 'time', 'ylabel': ''}, legend = {'display': True}) + plot_timeseries( + simulated_results.times, + thresholds_met, + options={"compact": True, "title": "Events", "xlabel": "time", "ylabel": ""}, + legend={"display": True}, + ) + + import matplotlib.pyplot as plt - import matplotlib.pyplot as plt plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/067e6c82fdc9573fb921219d62002181/new_state_estimator_example.py b/docs/_downloads/067e6c82fdc9573fb921219d62002181/new_state_estimator_example.py new file mode 100644 index 00000000..dd750022 --- /dev/null +++ b/docs/_downloads/067e6c82fdc9573fb921219d62002181/new_state_estimator_example.py @@ -0,0 +1,159 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + +""" +An example illustrating the creation of a new state estimator. + +In this example a basic state estimator is constructed by subclassing the StateEstimator class. This StateEstimator is then demonstrated with a ThrownObject model +""" + +from progpy.state_estimators import StateEstimator +from progpy.uncertain_data import ScalarData +import random + + +class BlindlyStumbleEstimator(StateEstimator): + """ + A new state estimator. This is not a very effective state estimator, but one that technically works. It blindly stumbles towards the correct state by randomly generating a new state each timestep and selecting the state that's most consistant with the measurements. + + I do not in any universe recommend using this state estimator for anything other then demonstrating a bad state estimator. It's intended as an example of creating a new state estimation algorithm. + + This state estimator was created by copying the state estimator template and filling out each function with the logic for this algorithm + """ + + def __init__(self, model, x0, measurement=None): + """ + Initialize the state estimator + + Args: + model (PrognosticsModel): Model to be used in state estimation + x0 (dict): Initial State + """ + self.m = model + self.state = x0 + + def estimate(self, t, u, z): + """ + Update the state estimate + + Args: + t (Number): Time + u (dict): Inputs (load) for time t + z (dict): Measured output at time t + """ + # Generate new candidate state + x2 = { + key: float(value) + 10 * (random.random() - 0.5) + for (key, value) in self.state.items() + } + + # Calculate outputs + z_est = self.m.output(t, self.state) + z_est2 = self.m.output(t, x2) + + # Now score them each by how close they are to the measured z + z_est_score = sum([abs(z_est[key] - z[key]) for key in self.m.outputs]) + z_est2_score = sum([abs(z_est2[key] - z[key]) for key in self.m.outputs]) + + # Now choose the closer one + if z_est2_score < z_est_score: + self.state = x2 + + @property + def x(self): + """ + Measured state + """ + return ScalarData(self.state) + + +# Model used in example +class ThrownObject: + """ + Model that similates an object thrown into the air without air resistance + """ + + inputs = [] # no inputs, no way to control + states = [ + "x", # Position (m) + "v", # Velocity (m/s) + ] + outputs = [ # Anything we can measure + "x" # Position (m) + ] + events = [ + "falling", # Event- object is falling + "impact", # Event- object has impacted ground + ] + + # The Default parameters. Overwritten by passing parameters dictionary into constructor + parameters = { + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 + "process_noise": 0.0, # amount of noise in each step + } + + def initialize(self, u=None, z=None): + self.max_x = 0.0 + return { + "x": self.parameters[ + "thrower_height" + ], # Thrown, so initial altitude is height of thrower + "v": self.parameters[ + "throwing_speed" + ], # Velocity at which the ball is thrown - this guy is an professional baseball pitcher + } + + def dx(self, t, x, u=None): + # apply_process_noise is used to add process noise to each step + return { + "x": x["v"], + "v": self.parameters["g"], # Acceleration of gravity + } + + def output(self, t, x): + return {"x": x["x"]} + + def event_state(self, t, x): + self.max_x = max(self.max_x, x["x"]) # Maximum altitude + return { + "falling": max( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + x["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height + } + + +def run_example(): + # This example creates a new state estimator, instead of using the included algorihtms. + # The new state estimator was defined above and can now be used like the UKF or PF + + # First we define the model to be used with the state estimator + m = ThrownObject() + + # Lets pretend we have no idea what the state is, we'll provide an estimate of 0 + x0 = {key: 0 for key in m.states} + filt = BlindlyStumbleEstimator(m, x0) + + # Now lets simulate it forward and see what it looks like + dt = 0.1 + x = m.initialize() + print("t: {}. State: {} (Ground truth: {})".format(0, filt.x.mean, x)) + for i in range(1, int(8.4 / dt)): + # Update ground truth state + x = {key: x[key] + m.dx(i * dt, x)[key] * dt for key in m.states} + + # Run estimation step + filt.estimate(i * dt, None, m.output(i * dt, x)) + + # Print result + print("t: {}. State: {} (Ground truth: {})".format(i * dt, filt.x.mean, x)) + + # The results probably should show that it is estimating the state with a significant delay + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/07264e8f18b6dbc8f0f634b492ebee76/benchmarking.py b/docs/_downloads/07264e8f18b6dbc8f0f634b492ebee76/benchmarking.py index c5c23bfa..b973f7eb 100644 --- a/docs/_downloads/07264e8f18b6dbc8f0f634b492ebee76/benchmarking.py +++ b/docs/_downloads/07264e8f18b6dbc8f0f634b492ebee76/benchmarking.py @@ -2,30 +2,34 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example benchmarking the computational efficiency of models. +Example benchmarking the computational efficiency of models. """ from timeit import timeit from progpy.models import BatteryCircuit + def run_example(): # Step 1: Create a model object batt = BatteryCircuit() - - # Step 2: Define future loading function + + # Step 2: Define future loading function def future_loading(t, x=None): # Constant Loading - return batt.InputContainer({'i': 2}) + return batt.InputContainer({"i": 2}) # Step 3: Benchmark simulation of 600 seconds - print('Benchmarking...') - def sim(): + print("Benchmarking...") + + def sim(): results = batt.simulate_to(600, future_loading) + time = timeit(sim, number=500) # Print results - print('Simulation Time: {} ms/sim'.format(time*2)) + print("Simulation Time: {} ms/sim".format(time * 2)) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/07c9eb121dbb5de8d48ddffc704c521d/new_state_estimator_example.py b/docs/_downloads/07c9eb121dbb5de8d48ddffc704c521d/new_state_estimator_example.py index eb15d3af..dd750022 100644 --- a/docs/_downloads/07c9eb121dbb5de8d48ddffc704c521d/new_state_estimator_example.py +++ b/docs/_downloads/07c9eb121dbb5de8d48ddffc704c521d/new_state_estimator_example.py @@ -19,7 +19,8 @@ class BlindlyStumbleEstimator(StateEstimator): This state estimator was created by copying the state estimator template and filling out each function with the logic for this algorithm """ - def __init__(self, model, x0, measurement = None): + + def __init__(self, model, x0, measurement=None): """ Initialize the state estimator @@ -40,7 +41,10 @@ def estimate(self, t, u, z): z (dict): Measured output at time t """ # Generate new candidate state - x2 = {key : float(value) + 10*(random.random()-0.5) for (key,value) in self.state.items()} + x2 = { + key: float(value) + 10 * (random.random() - 0.5) + for (key, value) in self.state.items() + } # Calculate outputs z_est = self.m.output(t, self.state) @@ -51,7 +55,7 @@ def estimate(self, t, u, z): z_est2_score = sum([abs(z_est2[key] - z[key]) for key in self.m.outputs]) # Now choose the closer one - if z_est2_score < z_est_score: + if z_est2_score < z_est_score: self.state = x2 @property @@ -61,86 +65,95 @@ def x(self): """ return ScalarData(self.state) + # Model used in example -class ThrownObject(): +class ThrownObject: """ Model that similates an object thrown into the air without air resistance """ - inputs = [] # no inputs, no way to control + inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] - outputs = [ # Anything we can measure - 'x' # Position (m) + "x", # Position (m) + "v", # Velocity (m/s) + ] + outputs = [ # Anything we can measure + "x" # Position (m) ] events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] # The Default parameters. Overwritten by passing parameters dictionary into constructor parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81, # Acceleration due to gravity in m/s^2 - 'process_noise': 0.0 # amount of noise in each step + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 + "process_noise": 0.0, # amount of noise in each step } - def initialize(self, u = None, z = None): + def initialize(self, u=None, z=None): self.max_x = 0.0 return { - 'x': self.parameters['thrower_height'], # Thrown, so initial altitude is height of thrower - 'v': self.parameters['throwing_speed'] # Velocity at which the ball is thrown - this guy is an professional baseball pitcher - } - - def dx(self, t, x, u = None): + "x": self.parameters[ + "thrower_height" + ], # Thrown, so initial altitude is height of thrower + "v": self.parameters[ + "throwing_speed" + ], # Velocity at which the ball is thrown - this guy is an professional baseball pitcher + } + + def dx(self, t, x, u=None): # apply_process_noise is used to add process noise to each step return { - 'x': x['v'], - 'v': self.parameters['g'] # Acceleration of gravity + "x": x["v"], + "v": self.parameters["g"], # Acceleration of gravity } def output(self, t, x): - return { - 'x': x['x'] - } + return {"x": x["x"]} - def event_state(self, t, x): - self.max_x = max(self.max_x, x['x']) # Maximum altitude + def event_state(self, t, x): + self.max_x = max(self.max_x, x["x"]) # Maximum altitude return { - 'falling': max(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': max(x['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + x["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } + def run_example(): - # This example creates a new state estimator, instead of using the included algorihtms. + # This example creates a new state estimator, instead of using the included algorihtms. # The new state estimator was defined above and can now be used like the UKF or PF - + # First we define the model to be used with the state estimator m = ThrownObject() # Lets pretend we have no idea what the state is, we'll provide an estimate of 0 - x0 = {key : 0 for key in m.states} + x0 = {key: 0 for key in m.states} filt = BlindlyStumbleEstimator(m, x0) # Now lets simulate it forward and see what it looks like dt = 0.1 x = m.initialize() - print('t: {}. State: {} (Ground truth: {})'.format(0, filt.x.mean, x)) - for i in range(1, int(8.4/dt)): + print("t: {}. State: {} (Ground truth: {})".format(0, filt.x.mean, x)) + for i in range(1, int(8.4 / dt)): # Update ground truth state - x = {key : x[key] + m.dx(i*dt, x)[key] * dt for key in m.states} + x = {key: x[key] + m.dx(i * dt, x)[key] * dt for key in m.states} # Run estimation step - filt.estimate(i*dt, None, m.output(i*dt, x)) + filt.estimate(i * dt, None, m.output(i * dt, x)) # Print result - print('t: {}. State: {} (Ground truth: {})'.format(i*dt, filt.x.mean, x)) + print("t: {}. State: {} (Ground truth: {})".format(i * dt, filt.x.mean, x)) # The results probably should show that it is estimating the state with a significant delay -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/08ee9859b313ce1d928f863d2d51af12/linear_model.ipynb b/docs/_downloads/08ee9859b313ce1d928f863d2d51af12/linear_model.ipynb index b4bbda54..8f8f3407 100644 --- a/docs/_downloads/08ee9859b313ce1d928f863d2d51af12/linear_model.ipynb +++ b/docs/_downloads/08ee9859b313ce1d928f863d2d51af12/linear_model.ipynb @@ -152,11 +152,11 @@ "outputs": [], "source": [ "class ThrownObject(LinearModel):\n", - " events = ['impact']\n", - " inputs = [] \n", - " states = ['x', 'v']\n", - " outputs = ['x']\n", - " \n", + " events = [\"impact\"]\n", + " inputs = []\n", + " states = [\"x\", \"v\"]\n", + " outputs = [\"x\"]\n", + "\n", " A = np.array([[0, 1], [0, 0]])\n", " C = np.array([[1, 0]])\n", " E = np.array([[0], [-9.81]])\n", @@ -181,8 +181,8 @@ "source": [ "class ThrownObject(ThrownObject): # Continue the ThrownObject class\n", " default_parameters = {\n", - " 'thrower_height': 1.83,\n", - " 'throwing_speed': 40,\n", + " \"thrower_height\": 1.83,\n", + " \"throwing_speed\": 40,\n", " }" ] }, @@ -210,10 +210,12 @@ "source": [ "class ThrownObject(ThrownObject):\n", " def initialize(self, u=None, z=None):\n", - " return self.StateContainer({\n", - " 'x': self.parameters['thrower_height'],\n", - " 'v': self.parameters['throwing_speed']\n", - " })" + " return self.StateContainer(\n", + " {\n", + " \"x\": self.parameters[\"thrower_height\"],\n", + " \"v\": self.parameters[\"throwing_speed\"],\n", + " }\n", + " )" ] }, { @@ -234,10 +236,7 @@ "source": [ "class ThrownObject(ThrownObject):\n", " def threshold_met(self, x):\n", - " return {\n", - " 'falling': x['v'] < 0,\n", - " 'impact': x['x'] <= 0\n", - " }" + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}" ] }, { @@ -255,11 +254,11 @@ "outputs": [], "source": [ "class ThrownObject(ThrownObject):\n", - " def event_state(self, x): \n", - " x_max = x['x'] + np.square(x['v'])/(9.81*2)\n", + " def event_state(self, x):\n", + " x_max = x[\"x\"] + np.square(x[\"v\"]) / (9.81 * 2)\n", " return {\n", - " 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0),\n", - " 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1\n", + " \"falling\": np.maximum(x[\"v\"] / self.parameters[\"throwing_speed\"], 0),\n", + " \"impact\": np.maximum(x[\"x\"] / x_max, 0) if x[\"v\"] < 0 else 1,\n", " }" ] }, @@ -280,7 +279,7 @@ "outputs": [], "source": [ "m = ThrownObject()\n", - "save = m.simulate_to_threshold(print = True, save_freq=1, threshold_keys='impact', dt=0.1)" + "save = m.simulate_to_threshold(print=True, save_freq=1, threshold_keys=\"impact\", dt=0.1)" ] }, { @@ -306,7 +305,8 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "save.outputs.plot(title='generated model')\n", + "\n", + "save.outputs.plot(title=\"generated model\")\n", "plt.show()" ] }, diff --git a/docs/_downloads/0c3148e089dc17af5515eff7ec298a11/tutorial.ipynb b/docs/_downloads/0c3148e089dc17af5515eff7ec298a11/tutorial.ipynb index 3c0647bc..897d94b7 100644 --- a/docs/_downloads/0c3148e089dc17af5515eff7ec298a11/tutorial.ipynb +++ b/docs/_downloads/0c3148e089dc17af5515eff7ec298a11/tutorial.ipynb @@ -79,7 +79,7 @@ "metadata": {}, "outputs": [], "source": [ - "samples = UnweightedSamples([{'x': 1, 'v':2}, {'x': 3, 'v':-2}])\n", + "samples = UnweightedSamples([{\"x\": 1, \"v\": 2}, {\"x\": 3, \"v\": -2}])\n", "print(samples)" ] }, @@ -115,8 +115,8 @@ "metadata": {}, "outputs": [], "source": [ - "print(samples.sample()) # A single sample\n", - "print(samples.sample(10)) # 10 samples" + "print(samples.sample()) # A single sample\n", + "print(samples.sample(10)) # 10 samples" ] }, { @@ -148,7 +148,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(samples.key('x'))" + "print(samples.key(\"x\"))" ] }, { @@ -164,10 +164,10 @@ "metadata": {}, "outputs": [], "source": [ - "print('mean', samples.mean)\n", - "print('median', samples.median)\n", - "print('covariance', samples.cov)\n", - "print('size', samples.size)" + "print(\"mean\", samples.mean)\n", + "print(\"median\", samples.median)\n", + "print(\"covariance\", samples.cov)\n", + "print(\"size\", samples.size)" ] }, { @@ -226,6 +226,7 @@ "# Turn into a distribution - this represents uncertainty in the initial state\n", "from progpy.uncertain_data import MultivariateNormalDist\n", "from numpy import diag\n", + "\n", "INITIAL_UNCERT = 0.05 # Uncertainty in initial state (%)\n", "# Construct covariance matrix (making sure each value is positive)\n", "cov = diag([max(abs(INITIAL_UNCERT * value), 1e-9) for value in x0.values()])\n", @@ -249,17 +250,17 @@ "outputs": [], "source": [ "print(\"Prior State:\", est.x.mean)\n", - "print('\\tSOC: ', m.event_state(est.x.mean)['EOD'])\n", - "fig = est.x.plot_scatter(label='prior')\n", + "print(\"\\tSOC: \", m.event_state(est.x.mean)[\"EOD\"])\n", + "fig = est.x.plot_scatter(label=\"prior\")\n", "\n", "t = 0.1\n", - "u = m.InputContainer({'i': 2})\n", - "example_measurements = m.OutputContainer({'t': 32.2, 'v': 3.915})\n", + "u = m.InputContainer({\"i\": 2})\n", + "example_measurements = m.OutputContainer({\"t\": 32.2, \"v\": 3.915})\n", "est.estimate(t, u, example_measurements)\n", "\n", "print(\"Posterior State:\", est.x.mean)\n", - "print('\\tSOC: ', m.event_state(est.x.mean)['EOD'])\n", - "est.x.plot_scatter(fig= fig, label='posterior')" + "print(\"\\tSOC: \", m.event_state(est.x.mean)[\"EOD\"])\n", + "est.x.plot_scatter(fig=fig, label=\"posterior\")" ] }, { @@ -325,19 +326,20 @@ "source": [ "x = est.x # The state estimate\n", "\n", + "\n", "def future_loading(t, x={}):\n", - " # Variable (piece-wise) future loading scheme \n", - " if (t < 600):\n", + " # Variable (piece-wise) future loading scheme\n", + " if t < 600:\n", " i = 2\n", - " elif (t < 900):\n", + " elif t < 900:\n", " i = 1\n", - " elif (t < 1800):\n", + " elif t < 1800:\n", " i = 4\n", - " elif (t < 3000):\n", + " elif t < 3000:\n", " i = 2\n", " else:\n", " i = 3\n", - " return m.InputContainer({'i': i})" + " return m.InputContainer({\"i\": i})" ] }, { @@ -378,10 +380,20 @@ "outputs": [], "source": [ "print(\"\\nEOD Predictions (s):\")\n", - "print('\\tPortion between 3005.2 and 3005.6: ', mc_results.time_of_event.percentage_in_bounds([3005.2, 3005.6]))\n", - "print('\\tAssuming ground truth 3005.25: ', mc_results.time_of_event.metrics(ground_truth = 3005.25))\n", - "from progpy.metrics import prob_success \n", - "print('\\tP(Success) if mission ends at 3005.25: ', prob_success(mc_results.time_of_event, 3005.25))" + "print(\n", + " \"\\tPortion between 3005.2 and 3005.6: \",\n", + " mc_results.time_of_event.percentage_in_bounds([3005.2, 3005.6]),\n", + ")\n", + "print(\n", + " \"\\tAssuming ground truth 3005.25: \",\n", + " mc_results.time_of_event.metrics(ground_truth=3005.25),\n", + ")\n", + "from progpy.metrics import prob_success\n", + "\n", + "print(\n", + " \"\\tP(Success) if mission ends at 3005.25: \",\n", + " prob_success(mc_results.time_of_event, 3005.25),\n", + ")" ] }, { @@ -399,11 +411,17 @@ "metadata": {}, "outputs": [], "source": [ - "fig = mc_results.states.snapshot(0).plot_scatter(label = \"t={:.0f}\".format(int(mc_results.times[0])))\n", + "fig = mc_results.states.snapshot(0).plot_scatter(\n", + " label=\"t={:.0f}\".format(int(mc_results.times[0]))\n", + ")\n", "for i in range(1, 4):\n", - " index = int(len(mc_results.times)/4*i)\n", - " mc_results.states.snapshot(index).plot_scatter(fig=fig, label = \"t={:.0f}\".format(mc_results.times[index]))\n", - "mc_results.states.snapshot(-1).plot_scatter(fig = fig, label = \"t={:.0f}\".format(int(mc_results.times[-1])))" + " index = int(len(mc_results.times) / 4 * i)\n", + " mc_results.states.snapshot(index).plot_scatter(\n", + " fig=fig, label=\"t={:.0f}\".format(mc_results.times[index])\n", + " )\n", + "mc_results.states.snapshot(-1).plot_scatter(\n", + " fig=fig, label=\"t={:.0f}\".format(int(mc_results.times[-1]))\n", + ")" ] }, { @@ -492,7 +510,8 @@ "metadata": {}, "outputs": [], "source": [ - "import random \n", + "import random\n", + "\n", "\n", "class BlindlyStumbleEstimator(StateEstimator):\n", " def __init__(self, model, x0):\n", @@ -501,7 +520,10 @@ "\n", " def estimate(self, t, u, z):\n", " # Generate new candidate state\n", - " x2 = {key : float(value) + 10*(random.random()-0.5) for (key,value) in self.state.items()}\n", + " x2 = {\n", + " key: float(value) + 10 * (random.random() - 0.5)\n", + " for (key, value) in self.state.items()\n", + " }\n", "\n", " # Calculate outputs\n", " z_est = self.m.output(self.state)\n", @@ -512,12 +534,12 @@ " z_est2_score = sum([abs(z_est2[key] - z[key]) for key in self.m.outputs])\n", "\n", " # Now choose the closer one\n", - " if z_est2_score < z_est_score: \n", + " if z_est2_score < z_est_score:\n", " self.state = x2\n", "\n", " @property\n", " def x(self):\n", - " return ScalarData(self.state)\n" + " return ScalarData(self.state)" ] }, { @@ -539,8 +561,8 @@ "se = BlindlyStumbleEstimator(m, x0)\n", "\n", "for i in range(25):\n", - " u = m.InputContainer({'i': 0})\n", - " z = m.OutputContainer({'t': 18.95, 'v': 4.183})\n", + " u = m.InputContainer({\"i\": 0})\n", + " z = m.OutputContainer({\"t\": 18.95, \"v\": 4.183})\n", " se.estimate(i, u, z)\n", " print(se.x.mean)\n", " print(\"\\tcorrect: {'tb': 18.95, 'qb': 7856.3254, 'qcp': 0, 'qcs': 0}\")" diff --git a/docs/_downloads/0c728dcdf3f367290bb7493d809b5dcb/full_lstm_model.py b/docs/_downloads/0c728dcdf3f367290bb7493d809b5dcb/full_lstm_model.py index 86828ead..1750bc96 100644 --- a/docs/_downloads/0c728dcdf3f367290bb7493d809b5dcb/full_lstm_model.py +++ b/docs/_downloads/0c728dcdf3f367290bb7493d809b5dcb/full_lstm_model.py @@ -2,9 +2,9 @@ # This ensures that the directory containing examples is in the python search directories """ -Example building a full model with events and thresholds using LSTMStateTransitionModel. +Example building a full model with events and thresholds using LSTMStateTransitionModel. -In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. +In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We then create a subclass of the LSTMStateTransitionModel, defining the event_state and threshold equations as a function of output. We use the generated model and compare to the original model. """ @@ -15,50 +15,68 @@ from progpy.data_models import LSTMStateTransitionModel from progpy.models import ThrownObject + def run_example(): # ----------------------------------------------------- # Method 1 - manual definition - # In this example we complete the models by manually defining event_state + # In this example we complete the models by manually defining event_state # and thresholds_met as function of output. # ----------------------------------------------------- TIMESTEP = 0.01 m = ThrownObject() + def future_loading(t, x=None): - return m.InputContainer({}) # No input for thrown object + return m.InputContainer({}) # No input for thrown object # Step 1: Generate additional data - # We will use data generated above, but we also want data at additional timesteps - print('Generating data...') - data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP) - data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2) - data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4) - data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2) - data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4) + # We will use data generated above, but we also want data at additional timesteps + print("Generating data...") + data = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) + data_half = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) # Step 2: Data Prep # We need to add the timestep as a input u = np.array([[TIMESTEP] for _ in data.inputs]) - u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs]) - u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs]) - u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs]) - u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) - # In this case we are saying that velocity is directly measurable, + # In this case we are saying that velocity is directly measurable, # unlike the original model. This is necessary to calculate the events. # Since the outputs will then match the states, we pass in the states below u_data = [u, u_half, u_quarter, u_twice, u_four] - z_data = [data.states, data_half.states, data_quarter.states, data_twice.states, data_four.states] + z_data = [ + data.states, + data_half.states, + data_quarter.states, + data_twice.states, + data_four.states, + ] # Step 3: Create model - print('Creating model...') + print("Creating model...") - # Create a subclass of LSTMStateTransitionModel, + # Create a subclass of LSTMStateTransitionModel, # overridding event-related methods and members class LSTMThrownObject(LSTMStateTransitionModel): events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] def initialize(self, u=None, z=None): @@ -70,54 +88,67 @@ def event_state(self, x): # Using class name instead of self allows the class to be subclassed z = LSTMThrownObject.output(self, x) # Logic from ThrownObject.event_state, using output instead of state - self.max_x = max(self.max_x, z['x']) # Maximum altitude + self.max_x = max(self.max_x, z["x"]) # Maximum altitude return { - 'falling': max(z['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': max(z['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + z["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + z["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } def threshold_met(self, x): z = LSTMThrownObject.output(self, x) # Logic from ThrownObject.threshold_met, using output instead of state - return { - 'falling': z['v'] < 0, - 'impact': z['x'] <= 0 - } - + return {"falling": z["v"] < 0, "impact": z["x"] <= 0} + # Step 4: Generate Model - print('Building model...') + print("Building model...") m2 = LSTMThrownObject.from_data( - inputs=u_data, + inputs=u_data, outputs=z_data, - window=4, - epochs=30, - input_keys = ['dt'], - output_keys = m.states) + window=4, + epochs=30, + input_keys=["dt"], + output_keys=m.states, + ) # Step 5: Simulate with model t_counter = 0 x_counter = m.initialize() - def future_loading3(t, x = None): + + def future_loading3(t, x=None): nonlocal t_counter, x_counter - z = m2.InputContainer({'x_t-1': x_counter['x'], 'v_t-1': x_counter['v'], 'dt': t - t_counter}) + z = m2.InputContainer( + {"x_t-1": x_counter["x"], "v_t-1": x_counter["v"], "dt": t - t_counter} + ) x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z # Use new dt, not used in training - # Using a dt not used in training will demonstrate the model's + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set - data = m.simulate_to_threshold(future_loading, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3) - results3 = m2.simulate_to_threshold(future_loading3, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3) + data = m.simulate_to_threshold( + future_loading, threshold_keys="impact", dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m2.simulate_to_threshold( + future_loading3, + threshold_keys="impact", + dt=TIMESTEP * 3, + save_freq=TIMESTEP * 3, + ) # Step 6: Compare Results - print('Comparing results...') - print('Predicted impact time:') - print('\tOriginal: ', data.times[-1]) - print('\tLSTM: ', results3.times[-1]) - data.outputs.plot(title='original model') - results3.outputs.plot(title='generated model') + print("Comparing results...") + print("Predicted impact time:") + print("\tOriginal: ", data.times[-1]) + print("\tLSTM: ", results3.times[-1]) + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/0ce31b25ff0ec8765f26a14bbdd01191/derived_params.py b/docs/_downloads/0ce31b25ff0ec8765f26a14bbdd01191/derived_params.py index 45aeed55..d9656a00 100644 --- a/docs/_downloads/0ce31b25ff0ec8765f26a14bbdd01191/derived_params.py +++ b/docs/_downloads/0ce31b25ff0ec8765f26a14bbdd01191/derived_params.py @@ -2,29 +2,31 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use the derived parameters feature for model building. +Example demonstrating ways to use the derived parameters feature for model building. """ from progpy.models.thrown_object import ThrownObject + def run_example(): # For this example we will use the ThrownObject model from the new_model example. # We will extend that model to include a derived parameter - # Let's assume that the throwing_speed was actually a function of thrower_height + # Let's assume that the throwing_speed was actually a function of thrower_height # (i.e., a taller thrower would throw the ball faster). # Here's how we would implement that # Step 1: Define a function for the relationship between thrower_height and throwing_speed. def update_thrown_speed(params): return { - 'throwing_speed': params['thrower_height'] * 21.85 + "throwing_speed": params["thrower_height"] * 21.85 } # Assumes thrown_speed is linear function of height + # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary # Step 2: Define the param callbacks - ThrownObject.param_callbacks.update({ - 'thrower_height': [update_thrown_speed] - }) # Tell the derived callbacks feature to call this function when thrower_height changes. + ThrownObject.param_callbacks.update( + {"thrower_height": [update_thrown_speed]} + ) # Tell the derived callbacks feature to call this function when thrower_height changes. # Note: Usually we would define this method within the class # for this example, we're doing it separately to improve readability # Note2: You can also have more than one function be called when a single parameter is changed. @@ -32,15 +34,23 @@ def update_thrown_speed(params): # Step 3: Use! obj = ThrownObject() - print("Default Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed'])) - + print( + "Default Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format( + obj.parameters["thrower_height"], obj.parameters["throwing_speed"] + ) + ) + # Now let's change the thrower_height print("changing height...") - obj.parameters['thrower_height'] = 1.75 # Our thrower is 1.75 m tall - print("\nUpdated Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed'])) + obj.parameters["thrower_height"] = 1.75 # Our thrower is 1.75 m tall + print( + "\nUpdated Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format( + obj.parameters["thrower_height"], obj.parameters["throwing_speed"] + ) + ) print("Notice how speed changed automatically with height") -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/0d5d9fc5c8f7058d644dbfb7ec8db854/dynamic_step_size.py b/docs/_downloads/0d5d9fc5c8f7058d644dbfb7ec8db854/dynamic_step_size.py index f92daf68..5a8baf1f 100644 --- a/docs/_downloads/0d5d9fc5c8f7058d644dbfb7ec8db854/dynamic_step_size.py +++ b/docs/_downloads/0d5d9fc5c8f7058d644dbfb7ec8db854/dynamic_step_size.py @@ -2,55 +2,70 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. +Example demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. """ -import prog_models from progpy.models.thrown_object import ThrownObject + def run_example(): print("EXAMPLE 1: dt of 1 until 8 sec, then 0.5\n\nSetting up...\n") # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return {} # Step 3: Define dynamic step size function - # This `next_time` function will specify what the next step of the simulation should be at any state and time. + # This `next_time` function will specify what the next step of the simulation should be at any state and time. # f(x, t) -> (t, dt) def next_time(t, x): - # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5 + # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5 if t < 8: return 1 return 0.5 # Step 4: Simulate to impact # Here we're printing every time step so we can see the step size change - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact']) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold( + future_load, + save_freq=1e-99, + print=True, + dt=next_time, + threshold_keys=["impact"], + ) # Example 2 - print("EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \n\nSetting up...\n") + print( + "EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \n\nSetting up...\n" + ) # Step 3: Define dynamic step size function - # This `next_time` function will specify what the next step of the simulation should be at any state and time. + # This `next_time` function will specify what the next step of the simulation should be at any state and time. # f(x, t) -> (t, dt) def next_time(t, x): # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.5, then 0.25 event_state = m.event_state(x) - if event_state['impact'] < 0.5: + if event_state["impact"] < 0.5: return 0.25 return 1 # Step 4: Simulate to impact # Here we're printing every time step so we can see the step size change - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact']) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold( + future_load, + save_freq=1e-99, + print=True, + dt=next_time, + threshold_keys=["impact"], + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py b/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py index c4081ac3..1bd00773 100644 --- a/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py +++ b/docs/_downloads/0e5fd1a6bf0ee8b3ff6785357ecebade/online_prog.py @@ -12,11 +12,12 @@ from pprint import pprint from time import sleep + def run_example(): # Step 1: Open a session with the server for a thrown object. # Use all default configuration options. # Except for the save frequency, which we'll set to 1 second. - session = prog_client.Session('ThrownObject', pred_cfg={'save_freq': 1}) + session = prog_client.Session("ThrownObject", pred_cfg={"save_freq": 1}) print(session) # Printing the Session Information # Step 2: Prepare data to send to server @@ -25,53 +26,53 @@ def run_example(): # Note: in an actual application, the data would be received from a sensor or other source. # The structure below is used to emulate the sensor. example_data = [ - (0, {'x': 1.83}), - (0.1, {'x': 5.81}), - (0.2, {'x': 9.75}), - (0.3, {'x': 13.51}), - (0.4, {'x': 17.20}), - (0.5, {'x': 20.87}), - (0.6, {'x': 24.37}), - (0.7, {'x': 27.75}), - (0.8, {'x': 31.09}), - (0.9, {'x': 34.30}), - (1.0, {'x': 37.42}), - (1.1, {'x': 40.43}), - (1.2, {'x': 43.35}), - (1.3, {'x': 46.17}), - (1.4, {'x': 48.91}), - (1.5, {'x': 51.53}), - (1.6, {'x': 54.05}), - (1.7, {'x': 56.50}), - (1.8, {'x': 58.82}), - (1.9, {'x': 61.05}), - (2.0, {'x': 63.20}), - (2.1, {'x': 65.23}), - (2.2, {'x': 67.17}), - (2.3, {'x': 69.02}), - (2.4, {'x': 70.75}), - (2.5, {'x': 72.40}) - ] + (0, {"x": 1.83}), + (0.1, {"x": 5.81}), + (0.2, {"x": 9.75}), + (0.3, {"x": 13.51}), + (0.4, {"x": 17.20}), + (0.5, {"x": 20.87}), + (0.6, {"x": 24.37}), + (0.7, {"x": 27.75}), + (0.8, {"x": 31.09}), + (0.9, {"x": 34.30}), + (1.0, {"x": 37.42}), + (1.1, {"x": 40.43}), + (1.2, {"x": 43.35}), + (1.3, {"x": 46.17}), + (1.4, {"x": 48.91}), + (1.5, {"x": 51.53}), + (1.6, {"x": 54.05}), + (1.7, {"x": 56.50}), + (1.8, {"x": 58.82}), + (1.9, {"x": 61.05}), + (2.0, {"x": 63.20}), + (2.1, {"x": 65.23}), + (2.2, {"x": 67.17}), + (2.3, {"x": 69.02}), + (2.4, {"x": 70.75}), + (2.5, {"x": 72.40}), + ] # Step 3: Send data to server, checking periodically for a prediction result. LAST_PREDICTION_TIME = None for i in range(len(example_data)): # Send data to server - print(f'{example_data[i][0]}s: Sending data to server... ', end='') + print(f"{example_data[i][0]}s: Sending data to server... ", end="") session.send_data(time=example_data[i][0], **example_data[i][1]) # Check for a prediction result status = session.get_prediction_status() - if LAST_PREDICTION_TIME != status["last prediction"]: + if LAST_PREDICTION_TIME != status["last prediction"]: # New prediction result LAST_PREDICTION_TIME = status["last prediction"] - print('Prediction Completed') - + print("Prediction Completed") + # Get prediction # Prediction is returned as a type uncertain_data, so you can manipulate it like that datatype. # See https://nasa.github.io/prog_algs/uncertain_data.html t, prediction = session.get_predicted_toe() - print(f'Predicted ToE (using state from {t}s): ') + print(f"Predicted ToE (using state from {t}s): ") pprint(prediction.mean) # Get Predicted future states @@ -81,15 +82,18 @@ def run_example(): # Return type is UnweightedSamplesPrediction (since we're using the monte carlo predictor) # See https://nasa.github.io/prog_algs t, event_states = session.get_predicted_event_state() - print(f'Predicted Event States (using state from {t}s): ') - es_means = [(event_states.times[i], event_states.snapshot(i).mean) for i in range(len(event_states.times))] + print(f"Predicted Event States (using state from {t}s): ") + es_means = [ + (event_states.times[i], event_states.snapshot(i).mean) + for i in range(len(event_states.times)) + ] for time, es_mean in es_means: print(f"\t{time}s: {es_mean}") # Note: you can also get the predicted future states of the model (see get_predicted_states()) or performance parameters (see get_predicted_performance_metrics()) else: - print('No prediction yet') + print("No prediction yet") # No updated prediction, send more data and check again later. sleep(0.1) @@ -98,6 +102,7 @@ def run_example(): # Note: You can also get the model from prog_server to work with directly. model = session.get_model() + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/0f1106d2e6e9fdf810b85394e9e69aed/serialization.py b/docs/_downloads/0f1106d2e6e9fdf810b85394e9e69aed/serialization.py index 2bbab189..2f557072 100644 --- a/docs/_downloads/0f1106d2e6e9fdf810b85394e9e69aed/serialization.py +++ b/docs/_downloads/0f1106d2e6e9fdf810b85394e9e69aed/serialization.py @@ -8,17 +8,18 @@ import pickle from prog_models.models import BatteryElectroChemEOD as Battery -def run_example(): + +def run_example(): ## Step 1: Create a model object batt = Battery() # Set process nosie to 0 to illustrate match between original and serialized versions - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 ### Step 2: serialize model for future use # Note: Model serialization has a lot of purposes, like saving a specific model to a file to be loaded later or sending a model to another machine over a network connection. - - # METHOD 1: Serialize with JSON + + # METHOD 1: Serialize with JSON save_json = batt.to_json() # Model can be called directly with serialized result @@ -27,55 +28,82 @@ def run_example(): # Serialized result can also be saved to a text file and uploaded later using the following code: txtFile = open("model_save_json.txt", "w") txtFile.write(save_json) - txtFile.close() + txtFile.close() - with open('model_save_json.txt') as infile: + with open("model_save_json.txt") as infile: load_json = infile.read() serial_2 = Battery.from_json(load_json) # METHOD 2: Serialize by pickling - pickle.dump(batt, open('model_save_pkl.pkl','wb')) - load_pkl = pickle.load(open('model_save_pkl.pkl','rb')) + pickle.dump(batt, open("model_save_pkl.pkl", "wb")) + load_pkl = pickle.load(open("model_save_pkl.pkl", "rb")) ## Step 3: Simulate to threshold and compare results options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } - # Define loading profile + # Define loading profile def future_loading(t, x=None): - if (t < 600): + if t < 600: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 1.5 else: i = 4 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) - # Simulate to threshold - results_orig = batt.simulate_to_threshold(future_loading,**options_sim) + # Simulate to threshold + results_orig = batt.simulate_to_threshold(future_loading, **options_sim) results_serial_1 = serial_1.simulate_to_threshold(future_loading, **options_sim) results_serial_2 = serial_2.simulate_to_threshold(future_loading, **options_sim) results_serial_3 = load_pkl.simulate_to_threshold(future_loading, **options_sim) # Plot results for comparison - voltage_orig = [results_orig.outputs[iter]['v'] for iter in range(len(results_orig.times))] - voltage_serial_1 = [results_serial_1.outputs[iter]['v'] for iter in range(len(results_serial_1.times))] - voltage_serial_2 = [results_serial_2.outputs[iter]['v'] for iter in range(len(results_serial_2.times))] - voltage_serial_3 = [results_serial_3.outputs[iter]['v'] for iter in range(len(results_serial_3.times))] - - plt.plot(results_orig.times,voltage_orig,'-b',label='Original surrogate') - plt.plot(results_serial_1.times,voltage_serial_1,'--r',label='First JSON serialized surrogate') - plt.plot(results_serial_2.times,voltage_serial_2,'-.g',label='Second JSON serialized surrogate') - plt.plot(results_serial_3.times, voltage_serial_3, '--y', label='Pickled serialized surrogate') + voltage_orig = [ + results_orig.outputs[iter]["v"] for iter in range(len(results_orig.times)) + ] + voltage_serial_1 = [ + results_serial_1.outputs[iter]["v"] + for iter in range(len(results_serial_1.times)) + ] + voltage_serial_2 = [ + results_serial_2.outputs[iter]["v"] + for iter in range(len(results_serial_2.times)) + ] + voltage_serial_3 = [ + results_serial_3.outputs[iter]["v"] + for iter in range(len(results_serial_3.times)) + ] + + plt.plot(results_orig.times, voltage_orig, "-b", label="Original surrogate") + plt.plot( + results_serial_1.times, + voltage_serial_1, + "--r", + label="First JSON serialized surrogate", + ) + plt.plot( + results_serial_2.times, + voltage_serial_2, + "-.g", + label="Second JSON serialized surrogate", + ) + plt.plot( + results_serial_3.times, + voltage_serial_3, + "--y", + label="Pickled serialized surrogate", + ) plt.legend() - plt.xlabel('Time (sec)') - plt.ylabel('Voltage (volts)') + plt.xlabel("Time (sec)") + plt.ylabel("Voltage (volts)") plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/0f7c5af7c0ba09c1be3bd3db3e6458c9/prog_model_template.py b/docs/_downloads/0f7c5af7c0ba09c1be3bd3db3e6458c9/prog_model_template.py index 6d48b133..bcbf5e53 100644 --- a/docs/_downloads/0f7c5af7c0ba09c1be3bd3db3e6458c9/prog_model_template.py +++ b/docs/_downloads/0f7c5af7c0ba09c1be3bd3db3e6458c9/prog_model_template.py @@ -13,15 +13,14 @@ from numpy import inf from progpy import PrognosticsModel + # REPLACE THIS WITH DERIVED PARAMETER CALLBACK FUNCTIONS (IF ANY) # See examples.derived_params # # Each function defines one or more derived parameters as a function of the other parameters. def example_callback(params): # Return format: dict of key: new value pair for at least one derived parameter - return { - "Example Parameter 1": params["Example Parameter 2"]-3 - } + return {"Example Parameter 1": params["Example Parameter 2"] - 3} class ProgModelTemplate(PrognosticsModel): @@ -33,58 +32,50 @@ class ProgModelTemplate(PrognosticsModel): # is_vectorized = True # REPLACE THE FOLLOWING LIST WITH EVENTS BEING PREDICTED - events = [ - 'Example Event' - ] - + events = ["Example Event"] + # REPLACE THE FOLLOWING LIST WITH INPUTS (LOADING) - inputs = [ - 'Example Input 1', - 'Example Input 2' - ] + inputs = ["Example Input 1", "Example Input 2"] # REPLACE THE FOLLOWING LIST WITH STATES states = [ - 'Examples State 1', - 'Examples State 2', - 'Examples State 3', - 'Examples State 4' + "Examples State 1", + "Examples State 2", + "Examples State 3", + "Examples State 4", ] # REPLACE THE FOLLOWING LIST WITH OUTPUTS (MEASURED VALUES) - outputs = [ - 'Example Output 1', - 'Example Output 2' - ] + outputs = ["Example Output 1", "Example Output 2"] # REPLACE THE FOLLOWING LIST WITH PERFORMANCE METRICS # i.e., NON-MEASURED VALUES THAT ARE A FUNCTION OF STATE # e.g., maximum torque of a motor performance_metric_keys = [ - 'metric 1', + "metric 1", ] # REPLACE THE FOLLOWING LIST WITH CONFIGURED PARAMETERS # Note- everything required to configure the model # should be in parameters- this is to enable the serialization features default_parameters = { # Set default parameters - 'Example Parameter 1': 0, - 'Example Parameter 2': 3, - 'process_noise': 0.1, # Process noise - 'x0': { # Initial state - 'Examples State 1': 1.5, - 'Examples State 2': -935, - 'Examples State 3': 42.1, - 'Examples State 4': 0 - } + "Example Parameter 1": 0, + "Example Parameter 2": 3, + "process_noise": 0.1, # Process noise + "x0": { # Initial state + "Examples State 1": 1.5, + "Examples State 2": -935, + "Examples State 3": 42.1, + "Examples State 4": 0, + }, } # REPLACE THE FOLLOWING WITH STATE BOUNDS IF NEEDED state_limits = { # 'state': (lower_limit, upper_limit) # only specify for states with limits - 'Examples State 1': (0, inf), - 'Examples State 4': (-2, 3) + "Examples State 1": (0, inf), + "Examples State 4": (-2, 3), } # Identify callbacks used by this model @@ -94,9 +85,7 @@ class ProgModelTemplate(PrognosticsModel): # And callbacks are one or more callback functions that define parameters that are # derived from that parameter # REPLACE THIS WITH ACTUAL DERIVED PARAMETER CALLBACKS - param_callbacks = { - "Example Parameter 2": [example_callback] - } + param_callbacks = {"Example Parameter 2": [example_callback]} # UNCOMMENT THIS FUNCTION IF YOU NEED CONSTRUCTION LOGIC (E.G., INPUT VALIDATION) # def __init__(self, **kwargs): @@ -175,7 +164,7 @@ class ProgModelTemplate(PrognosticsModel): # dx : StateContainer # First derivative of state, with keys defined by model.states # e.g., dx = {'abc': 3.1, 'def': -2.003} given states = ['abc', 'def'] - # + # # Example # ------- # | m = DerivProgModel() # Replace with specific model being simulated @@ -232,7 +221,7 @@ def output(self, x): x : StateContainer state, with keys defined by model.states e.g., x = {'abc': 332.1, 'def': 221.003} given states = ['abc', 'def'] - + Returns ------- z : OutputContainer @@ -242,10 +231,7 @@ def output(self, x): # REPLACE BELOW WITH LOGIC TO CALCULATE OUTPUTS # NOTE: KEYS FOR z MATCH 'outputs' LIST ABOVE - z = self.OutputContainer({ - 'Example Output 1': 0.0, - 'Example Output 2': 0.0 - }) + z = self.OutputContainer({"Example Output 1": 0.0, "Example Output 2": 0.0}) return z @@ -258,7 +244,7 @@ def event_state(self, x): x : StateContainer state, with keys defined by model.states e.g., x = {'abc': 332.1, 'def': 221.003} given states = ['abc', 'def'] - + Returns ------- event_state : dict @@ -268,12 +254,10 @@ def event_state(self, x): # REPLACE BELOW WITH LOGIC TO CALCULATE EVENT STATES # NOTE: KEYS FOR event_x MATCH 'events' LIST ABOVE - event_x = { - 'Example Event': 0.95 - } + event_x = {"Example Event": 0.95} return event_x - + # Note: Thresholds met equation below is not strictly necessary. # By default, threshold_met will check if event_state is ≤ 0 for each event def threshold_met(self, x): @@ -285,7 +269,7 @@ def threshold_met(self, x): x : StateContainer state, with keys defined by model.states e.g., x = {'abc': 332.1, 'def': 221.003} given states = ['abc', 'def'] - + Returns ------- thresholds_met : dict @@ -295,9 +279,7 @@ def threshold_met(self, x): # REPLACE BELOW WITH LOGIC TO CALCULATE IF THRESHOLDS ARE MET # NOTE: KEYS FOR t_met MATCH 'events' LIST ABOVE - t_met = { - 'Example Event': False - } + t_met = {"Example Event": False} return t_met @@ -310,7 +292,7 @@ def performance_metrics(self, x) -> dict: x : StateContainer state, with keys defined by model.states \n e.g., x = m.StateContainer({'abc': 332.1, 'def': 221.003}) given states = ['abc', 'def'] - + Returns ------- pm : dict @@ -328,9 +310,7 @@ def performance_metrics(self, x) -> dict: # REPLACE BELOW WITH LOGIC TO CALCULATE PERFORMANCE METRICS # NOTE: KEYS FOR p_metrics MATCH 'performance_metric_keys' LIST ABOVE - p_metrics = { - 'metric1': 23 - } + p_metrics = {"metric1": 23} return p_metrics # V UNCOMMENT THE BELOW FUNCTION FOR DIRECT FUNCTIONS V diff --git a/docs/_downloads/11b2c744d2a7f75cacc676317a47260e/full_lstm_model.py b/docs/_downloads/11b2c744d2a7f75cacc676317a47260e/full_lstm_model.py new file mode 100644 index 00000000..6dd6f918 --- /dev/null +++ b/docs/_downloads/11b2c744d2a7f75cacc676317a47260e/full_lstm_model.py @@ -0,0 +1,153 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. +# This ensures that the directory containing examples is in the python search directories + +""" +Example building a full model with events and thresholds using LSTMStateTransitionModel. + +.. dropdown:: More details + + In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. + + We then create a subclass of the LSTMStateTransitionModel, defining the event_state and threshold equations as a function of output. We use the generated model and compare to the original model. +""" + +import matplotlib.pyplot as plt +import numpy as np +from progpy.data_models import LSTMStateTransitionModel +from progpy.models import ThrownObject + + +def run_example(): + # ----------------------------------------------------- + # Method 1 - manual definition + # In this example we complete the models by manually defining event_state + # and thresholds_met as function of output. + # ----------------------------------------------------- + TIMESTEP = 0.01 + m = ThrownObject() + + def future_loading(t, x=None): + return m.InputContainer({}) # No input for thrown object + + # Step 1: Generate additional data + # We will use data generated above, but we also want data at additional timesteps + print("Generating data...") + data = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) + data_half = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) + + # Step 2: Data Prep + # We need to add the timestep as a input + u = np.array([[TIMESTEP] for _ in data.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) + + # In this case we are saying that velocity is directly measurable, + # unlike the original model. This is necessary to calculate the events. + # Since the outputs will then match the states, we pass in the states below + + u_data = [u, u_half, u_quarter, u_twice, u_four] + z_data = [ + data.states, + data_half.states, + data_quarter.states, + data_twice.states, + data_four.states, + ] + + # Step 3: Create model + print("Creating model...") + + # Create a subclass of LSTMStateTransitionModel, + # overriding event-related methods and members + class LSTMThrownObject(LSTMStateTransitionModel): + events = [ + "falling", # Event- object is falling + "impact", # Event- object has impacted ground + ] + + def initialize(self, u=None, z=None): + # Add logic required for thrown object + self.max_x = 0.0 + return super().initialize(u, z) + + def event_state(self, x): + # Using class name instead of self allows the class to be subclassed + z = LSTMThrownObject.output(self, x) + # Logic from ThrownObject.event_state, using output instead of state + self.max_x = max(self.max_x, z["x"]) # Maximum altitude + return { + "falling": max( + z["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + z["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height + } + + def threshold_met(self, x): + z = LSTMThrownObject.output(self, x) + # Logic from ThrownObject.threshold_met, using output instead of state + return {"falling": z["v"] < 0, "impact": z["x"] <= 0} + + # Step 4: Generate Model + print("Building model...") + m2 = LSTMThrownObject.from_data( + inputs=u_data, + outputs=z_data, + window=4, + epochs=30, + input_keys=["dt"], + output_keys=m.states, + ) + m2.plot_history() + + # Step 5: Simulate with model + t_counter = 0 + x_counter = m.initialize() + + def future_loading3(t, x=None): + nonlocal t_counter, x_counter + z = m2.InputContainer( + {"x_t-1": x_counter["x"], "v_t-1": x_counter["v"], "dt": t - t_counter} + ) + x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) + t_counter = t + return z + + # Use new dt, not used in training + # Using a dt not used in training will demonstrate the model's + # ability to handle different timesteps not part of training set + data = m.simulate_to_threshold( + future_loading, events="impact", dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m2.simulate_to_threshold( + future_loading3, events="impact", dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + + # Step 6: Compare Results + print("Comparing results...") + print("Predicted impact time:") + print("\tOriginal: ", data.times[-1]) + print("\tLSTM: ", results3.times[-1]) + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") + plt.show() + + +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/12138a14c1f3eeb320b181ff8e8c5ab9/playback.py b/docs/_downloads/12138a14c1f3eeb320b181ff8e8c5ab9/playback.py index 638baf78..81222c7a 100644 --- a/docs/_downloads/12138a14c1f3eeb320b181ff8e8c5ab9/playback.py +++ b/docs/_downloads/12138a14c1f3eeb320b181ff8e8c5ab9/playback.py @@ -1,15 +1,15 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. """ -This example performs state estimation and prediction using playback data. - +This example performs state estimation and prediction using playback data. + Method: An instance of the BatteryCircuit model in prog_models is created, the state estimation is set up by defining a state_estimator, and the prediction method is set up by defining a predictor. Prediction is then performed using playback data. For each data point: 1) The necessary data is extracted (time, current load, output values) and corresponding values defined (t, i, and z) 2) The current state estimate is performed and samples are drawn from this distribution 3) Prediction performed to get future states (with uncertainty) and the times at which the event threshold will be reached - -Results: + +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) iii) Various prediction metrics @@ -35,73 +35,90 @@ # Constants NUM_SAMPLES = 20 -NUM_PARTICLES = 1000 # For state estimator (if using ParticleFilter) +NUM_PARTICLES = 1000 # For state estimator (if using ParticleFilter) TIME_STEP = 1 -PREDICTION_UPDATE_FREQ = 50 # Number of steps between prediction update +PREDICTION_UPDATE_FREQ = 50 # Number of steps between prediction update PLOT = True -PROCESS_NOISE = 1e-4 # Percentage process noise -MEASUREMENT_NOISE = 1e-4 # Percentage measurement noise -X0_COV = 1 # Covariance percentage with initial state -GROUND_TRUTH = {'EOD':2780} +PROCESS_NOISE = 1e-4 # Percentage process noise +MEASUREMENT_NOISE = 1e-4 # Percentage measurement noise +X0_COV = 1 # Covariance percentage with initial state +GROUND_TRUTH = {"EOD": 2780} ALPHA = 0.05 BETA = 0.90 LAMBDA_VALUE = 1500 + def run_example(): # Setup Model batt = Battery() # Initial state x0 = batt.initialize() - batt.parameters['process_noise'] = {key: PROCESS_NOISE * value for key, value in x0.items()} + batt.parameters["process_noise"] = { + key: PROCESS_NOISE * value for key, value in x0.items() + } z0 = batt.output(x0) - batt.parameters['measurement_noise'] = {key: MEASUREMENT_NOISE * value for key, value in z0.items()} - x0 = MultivariateNormalDist(x0.keys(), list(x0.values()), np.diag([max(1e-9, X0_COV * abs(x)) for x in x0.values()])) + batt.parameters["measurement_noise"] = { + key: MEASUREMENT_NOISE * value for key, value in z0.items() + } + x0 = MultivariateNormalDist( + x0.keys(), + list(x0.values()), + np.diag([max(1e-9, X0_COV * abs(x)) for x in x0.values()]), + ) # Setup State Estimation - filt = StateEstimator(batt, x0, num_particles = NUM_PARTICLES) + filt = StateEstimator(batt, x0, num_particles=NUM_PARTICLES) # Setup Prediction - load = batt.InputContainer({'i': 2.35}) + load = batt.InputContainer({"i": 2.35}) + def future_loading(t, x=None): return load - Q = np.diag([batt.parameters['process_noise'][key] for key in batt.states]) - R = np.diag([batt.parameters['measurement_noise'][key] for key in batt.outputs]) - mc = Predictor(batt, Q = Q, R = R) + + Q = np.diag([batt.parameters["process_noise"][key] for key in batt.states]) + R = np.diag([batt.parameters["measurement_noise"][key] for key in batt.outputs]) + mc = Predictor(batt, Q=Q, R=R) # Run Playback step = 0 profile = ToEPredictionProfile() - - with open('examples/data_const_load.csv', 'r') as f: + + with open("examples/data_const_load.csv", "r") as f: reader = csv.reader(f) - next(reader) # Skip header + next(reader) # Skip header for row in reader: step += 1 print("{} s: {} W, {} C, {} V".format(*row)) t = float(row[0]) - i = {'i': float(row[1])/float(row[3])} - z = {'t': float(row[2]), 'v': float(row[3])} + i = {"i": float(row[1]) / float(row[3])} + z = {"t": float(row[2]), "v": float(row[3])} # State Estimation Step - filt.estimate(t, i, z) - eod = batt.event_state(filt.x.mean)['EOD'] + filt.estimate(t, i, z) + eod = batt.event_state(filt.x.mean)["EOD"] print(" - Event State: ", eod) # Prediction Step (every PREDICTION_UPDATE_FREQ steps) - if (step%PREDICTION_UPDATE_FREQ == 0): - mc_results = mc.predict(filt.x, future_loading, t0 = t, n_samples=NUM_SAMPLES, dt=TIME_STEP) + if step % PREDICTION_UPDATE_FREQ == 0: + mc_results = mc.predict( + filt.x, future_loading, t0=t, n_samples=NUM_SAMPLES, dt=TIME_STEP + ) metrics = mc_results.time_of_event.metrics() - print(' - ToE: {} (sigma: {})'.format(metrics['EOD']['mean'], metrics['EOD']['std'])) + print( + " - ToE: {} (sigma: {})".format( + metrics["EOD"]["mean"], metrics["EOD"]["std"] + ) + ) profile.add_prediction(t, mc_results.time_of_event) # Calculating Prognostic Horizon once the loop completes from prog_algs.uncertain_data.uncertain_data import UncertainData from prog_algs.metrics import samples as metrics - def criteria_eqn(tte : UncertainData, ground_truth_tte : dict) -> dict: + def criteria_eqn(tte: UncertainData, ground_truth_tte: dict) -> dict: """ - Sample criteria equation for playback. + Sample criteria equation for playback. # UPDATE THIS CRITERIA EQN AND WHAT IS CALCULATED Args: @@ -110,17 +127,23 @@ def criteria_eqn(tte : UncertainData, ground_truth_tte : dict) -> dict: ground_truth_tte : dict Dictionary of ground truth of time to event. """ - + # Set an alpha value bounds = {} for key, value in ground_truth_tte.items(): # Set bounds for precentage_in_bounds by adding/subtracting to the ground_truth alpha_calc = value * ALPHA - bounds[key] = [value - alpha_calc, value + alpha_calc] # Construct bounds for all events + bounds[key] = [ + value - alpha_calc, + value + alpha_calc, + ] # Construct bounds for all events percentage_in_bounds = tte.percentage_in_bounds(bounds) - + # Verify if percentage in bounds for this ground truth meets beta distribution percentage limit - return {key: percentage_in_bounds[key] > BETA for key in percentage_in_bounds.keys()} + return { + key: percentage_in_bounds[key] > BETA + for key in percentage_in_bounds.keys() + } # Generate plots for playback example playback_plots = profile.plot(GROUND_TRUTH, ALPHA, True) @@ -137,8 +160,9 @@ def criteria_eqn(tte : UncertainData, ground_truth_tte : dict) -> dict: cra = profile.cumulative_relative_accuracy(GROUND_TRUTH) print(f"Cumulative Relative Accuracy for 'EOD': {cra['EOD']}") - input('Press any key to exit') + input("Press any key to exit") + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/123b429fd9474f8fa68ef1e5a9bce236/events.py b/docs/_downloads/123b429fd9474f8fa68ef1e5a9bce236/events.py index a2ff1d67..09b1c569 100644 --- a/docs/_downloads/123b429fd9474f8fa68ef1e5a9bce236/events.py +++ b/docs/_downloads/123b429fd9474f8fa68ef1e5a9bce236/events.py @@ -2,26 +2,28 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example further illustrating the concept of 'events' which generalizes EOL. +Example further illustrating the concept of 'events' which generalizes EOL. -'Events' is the term used to describe something to be predicted. -Generally in the PHM community these are referred to as End of Life (EOL). +'Events' is the term used to describe something to be predicted. +Generally in the PHM community these are referred to as End of Life (EOL). However, they can be much more. -In the prog_models package, events can be anything that needs to be predicted. -Events can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). +In the prog_models package, events can be anything that needs to be predicted. +Events can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). -This example demonstrates how events can be used in your applications. +This example demonstrates how events can be used in your applications. """ + from progpy.models import BatteryElectroChemEOD + def run_example(): # Example: Warning thresholds # In this example we will use the battery model # We of course are interested in end of discharge, but for this example we # have a requirement that says the battery must not fall below 5% State of Charge (SOC) # Note: SOC is the event state for the End of Discharge (EOD) event - # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occured. + # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occured. # So, 5% SOC corresponds to an 'EOD' event state of 0.05 # Additionally, we have two warning thresholds (yellow and red) @@ -31,7 +33,11 @@ def run_example(): # Step 1: Extend the battery model to define the additional events class MyBatt(BatteryElectroChemEOD): - events = BatteryElectroChemEOD.events + ['EOD_warn_yellow', 'EOD_warn_red', 'EOD_requirement_threshold'] + events = BatteryElectroChemEOD.events + [ + "EOD_warn_yellow", + "EOD_warn_red", + "EOD_requirement_threshold", + ] def event_state(self, state): # Get event state from parent @@ -40,22 +46,30 @@ def event_state(self, state): # Add yellow, red, and failure states by scaling EOD state # Here we scale so the threshold SOC is 0 by their associated events, while SOC of 1 is still 1 # For example, for yellow we want EOD_warn_yellow to be 1 when SOC is 1, and 0 when SOC is YELLOW_THRESH or lower - event_state['EOD_warn_yellow'] = (event_state['EOD']-YELLOW_THRESH)/(1-YELLOW_THRESH) - event_state['EOD_warn_red'] = (event_state['EOD']-RED_THRESH)/(1-RED_THRESH) - event_state['EOD_requirement_threshold'] = (event_state['EOD']-THRESHOLD)/(1-THRESHOLD) + event_state["EOD_warn_yellow"] = (event_state["EOD"] - YELLOW_THRESH) / ( + 1 - YELLOW_THRESH + ) + event_state["EOD_warn_red"] = (event_state["EOD"] - RED_THRESH) / ( + 1 - RED_THRESH + ) + event_state["EOD_requirement_threshold"] = ( + event_state["EOD"] - THRESHOLD + ) / (1 - THRESHOLD) # Return return event_state def threshold_met(self, x): # Get threshold met from parent - t_met = super().threshold_met(x) + t_met = super().threshold_met(x) # Add yell and red states from event_state event_state = self.event_state(x) - t_met['EOD_warn_yellow'] = event_state['EOD_warn_yellow'] <= 0 - t_met['EOD_warn_red'] = event_state['EOD_warn_red'] <= 0 - t_met['EOD_requirement_threshold'] = event_state['EOD_requirement_threshold'] <= 0 + t_met["EOD_warn_yellow"] = event_state["EOD_warn_yellow"] <= 0 + t_met["EOD_warn_red"] = event_state["EOD_warn_red"] <= 0 + t_met["EOD_requirement_threshold"] = ( + event_state["EOD_requirement_threshold"] <= 0 + ) return t_met @@ -64,28 +78,32 @@ def threshold_met(self, x): # 2a: Setup model def future_loading(t, x=None): - # Variable (piece-wise) future loading scheme - # For a battery, future loading is in term of current 'i' in amps. - if (t < 600): + # Variable (piece-wise) future loading scheme + # For a battery, future loading is in term of current 'i' in amps. + if t < 600: i = 2 - elif (t < 900): + elif t < 900: i = 1 - elif (t < 1800): + elif t < 1800: i = 4 - elif (t < 3000): - i = 2 + elif t < 3000: + i = 2 else: i = 3 - return m.InputContainer({'i': i}) - + return m.InputContainer({"i": i}) + # 2b: Simulate to threshold - simulated_results = m.simulate_to_threshold(future_loading, threshold_keys=['EOD'], print = True) + simulated_results = m.simulate_to_threshold( + future_loading, threshold_keys=["EOD"], print=True + ) # 2c: Plot results simulated_results.event_states.plot() import matplotlib.pyplot as plt + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/124917318b8aeda76cd308154fd2eaf9/derived_params.py b/docs/_downloads/124917318b8aeda76cd308154fd2eaf9/derived_params.py index fd479556..7d35e46a 100644 --- a/docs/_downloads/124917318b8aeda76cd308154fd2eaf9/derived_params.py +++ b/docs/_downloads/124917318b8aeda76cd308154fd2eaf9/derived_params.py @@ -2,33 +2,35 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use the derived parameters feature for model building. +Example demonstrating ways to use the derived parameters feature for model building. .. dropdown:: More details - + In this example, a derived parameter (i.e., a parameter that is a function of another parameter) are defined for the simple ThrownObject model. These parameters are then calculated whenever their dependency parameters are updated, eliminating the need to calculate each timestep in simulation. The functionality of this feature is then demonstrated. """ from progpy.models.thrown_object import ThrownObject + def run_example(): # For this example we will use the ThrownObject model from the new_model example. # We will extend that model to include a derived parameter - # Let's assume that the throwing_speed was actually a function of thrower_height + # Let's assume that the throwing_speed was actually a function of thrower_height # (i.e., a taller thrower would throw the ball faster). # Here's how we would implement that # Step 1: Define a function for the relationship between thrower_height and throwing_speed. def update_thrown_speed(params): return { - 'throwing_speed': params['thrower_height'] * 21.85 + "throwing_speed": params["thrower_height"] * 21.85 } # Assumes thrown_speed is linear function of height + # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary # Step 2: Define the param callbacks - ThrownObject.param_callbacks.update({ - 'thrower_height': [update_thrown_speed] - }) # Tell the derived callbacks feature to call this function when thrower_height changes. + ThrownObject.param_callbacks.update( + {"thrower_height": [update_thrown_speed]} + ) # Tell the derived callbacks feature to call this function when thrower_height changes. # Note: Usually we would define this method within the class # for this example, we're doing it separately to improve readability # Note2: You can also have more than one function be called when a single parameter is changed. @@ -36,15 +38,23 @@ def update_thrown_speed(params): # Step 3: Use! obj = ThrownObject() - print("Default Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed'])) - + print( + "Default Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format( + obj.parameters["thrower_height"], obj.parameters["throwing_speed"] + ) + ) + # Now let's change the thrower_height print("changing height...") - obj.parameters['thrower_height'] = 1.75 # Our thrower is 1.75 m tall - print("\nUpdated Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed'])) + obj.parameters["thrower_height"] = 1.75 # Our thrower is 1.75 m tall + print( + "\nUpdated Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format( + obj.parameters["thrower_height"], obj.parameters["throwing_speed"] + ) + ) print("Notice how speed changed automatically with height") -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/134152f7c47b3c3fa2378dcb3ac443d7/basic_example_battery.py b/docs/_downloads/134152f7c47b3c3fa2378dcb3ac443d7/basic_example_battery.py index d391caa1..3184e483 100644 --- a/docs/_downloads/134152f7c47b3c3fa2378dcb3ac443d7/basic_example_battery.py +++ b/docs/_downloads/134152f7c47b3c3fa2378dcb3ac443d7/basic_example_battery.py @@ -2,13 +2,13 @@ """ This example extends the "basic example" to perform a state estimation and prediction with uncertainty given a more complicated model. Models, state estimators, and predictors can be switched out. See documentation nasa.github.io/progpy for description of options - + Method: An instance of the BatteryCircuit model in prog_models is created, and the prediction process is achieved in three steps: 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate 2) Prediction of future states (with uncertainty) and the times at which the event threshold will be reached 3) Metrics tools are used to further investigate the results of prediction -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) iii) Various prediction metrics @@ -27,31 +27,30 @@ # VVV Uncomment this to use UnscentedTransform Predictor VVV # from progpy.predictors import UnscentedTransformPredictor as Predictor + def run_example(): # Step 1: Setup model & future loading # Measurement noise - R_vars = { - 't': 2, - 'v': 0.02 - } - batt = Battery(process_noise = 0.25, measurement_noise = R_vars) + R_vars = {"t": 2, "v": 0.02} + batt = Battery(process_noise=0.25, measurement_noise=R_vars) # Creating the input containers outside of the function accelerates prediction loads = [ - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 1}), - batt.InputContainer({'i': 4}), - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 3}) + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 1}), + batt.InputContainer({"i": 4}), + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 3}), ] - def future_loading(t, x = None): - # Variable (piece-wise) future loading scheme - if (t < 600): + + def future_loading(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 600: return loads[0] - elif (t < 900): + elif t < 900: return loads[1] - elif (t < 1800): + elif t < 1800: return loads[2] - elif (t < 3000): + elif t < 3000: return loads[3] return loads[-1] @@ -62,22 +61,24 @@ def future_loading(t, x = None): # Step 2a: Setup filt = StateEstimator(batt, initial_state) - + # Step 2b: Print & Plot Prior State print("Prior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) - fig = filt.x.plot_scatter(label='prior') + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + fig = filt.x.plot_scatter(label="prior") # Step 2c: Perform state estimation step - example_measurements = batt.OutputContainer({'t': 32.2, 'v': 3.915}) + example_measurements = batt.OutputContainer({"t": 32.2, "v": 3.915}) t = 0.1 u = future_loading(t) filt.estimate(t, u, example_measurements) # Step 2d: Print & Plot Resulting Posterior State print("\nPosterior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) - filt.x.plot_scatter(fig=fig, label='posterior') # Add posterior state to figure from prior state + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + filt.x.plot_scatter( + fig=fig, label="posterior" + ) # Add posterior state to figure from prior state # Note: in a prognostic application the above state estimation step would be repeated each time # there is new data. Here we're doing one step to demonstrate how the state estimator is used @@ -92,13 +93,16 @@ def future_loading(t, x = None): NUM_SAMPLES = 25 STEP_SIZE = 0.1 SAVE_FREQ = 100 # How often to save results - mc_results = mc.predict(filt.x, future_loading, n_samples = NUM_SAMPLES, dt=STEP_SIZE, save_freq = SAVE_FREQ) - print('ToE', mc_results.time_of_event.mean) + mc_results = mc.predict( + filt.x, future_loading, n_samples=NUM_SAMPLES, dt=STEP_SIZE, save_freq=SAVE_FREQ + ) + print("ToE", mc_results.time_of_event.mean) # Step 3c: Analyze the results # Note: The results of a sample-based prediction can be accessed by sample, e.g., from progpy.predictors import UnweightedSamplesPrediction + if isinstance(mc_results, UnweightedSamplesPrediction): states_sample_1 = mc_results.states[1] # now states_sample_1[n] corresponds to times[n] for the first sample @@ -108,41 +112,63 @@ def future_loading(t, x = None): # now you have all the samples corresponding to times[1] # Print Results - print('Results: ') + print("Results: ") for i, time in enumerate(mc_results.times): - print('\nt = {}'.format(time)) - print('\tu = {}'.format(mc_results.inputs.snapshot(i).mean)) - print('\tx = {}'.format(mc_results.states.snapshot(i).mean)) - print('\tz = {}'.format(mc_results.outputs.snapshot(i).mean)) - print('\tevent state = {}'.format(mc_results.event_states.snapshot(i).mean)) + print("\nt = {}".format(time)) + print("\tu = {}".format(mc_results.inputs.snapshot(i).mean)) + print("\tx = {}".format(mc_results.states.snapshot(i).mean)) + print("\tz = {}".format(mc_results.outputs.snapshot(i).mean)) + print("\tevent state = {}".format(mc_results.event_states.snapshot(i).mean)) # You can also access the final state (of type UncertainData), like so: final_state = mc_results.time_of_event.final_state - print('Final state @EOD: ', final_state['EOD'].mean) - + print("Final state @EOD: ", final_state["EOD"].mean) + # You can also use the metrics package to generate some useful metrics on the result of a prediction print("\nEOD Prediction Metrics") from progpy.metrics import prob_success - print('\tPortion between 3005.2 and 3005.6: ', mc_results.time_of_event.percentage_in_bounds([3005.2, 3005.6])) - print('\tAssuming ground truth 3002.25: ', mc_results.time_of_event.metrics(ground_truth=3005.25)) - print('\tP(Success) if mission ends at 3002.25: ', prob_success(mc_results.time_of_event, 3005.25)) - # Plot state transition + print( + "\tPortion between 3005.2 and 3005.6: ", + mc_results.time_of_event.percentage_in_bounds([3005.2, 3005.6]), + ) + print( + "\tAssuming ground truth 3002.25: ", + mc_results.time_of_event.metrics(ground_truth=3005.25), + ) + print( + "\tP(Success) if mission ends at 3002.25: ", + prob_success(mc_results.time_of_event, 3005.25), + ) + + # Plot state transition # Here we will plot the states at t0, 25% to ToE, 50% to ToE, 75% to ToE, and ToE - fig = mc_results.states.snapshot(0).plot_scatter(label = "t={} s".format(int(mc_results.times[0]))) # 0 - quarter_index = int(len(mc_results.times)/4) - mc_results.states.snapshot(quarter_index).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index]))) # 25% - mc_results.states.snapshot(quarter_index*2).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*2]))) # 50% - mc_results.states.snapshot(quarter_index*3).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*3]))) # 75% - mc_results.states.snapshot(-1).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[-1]))) # 100% + fig = mc_results.states.snapshot(0).plot_scatter( + label="t={} s".format(int(mc_results.times[0])) + ) # 0 + quarter_index = int(len(mc_results.times) / 4) + mc_results.states.snapshot(quarter_index).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index])) + ) # 25% + mc_results.states.snapshot(quarter_index * 2).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 2])) + ) # 50% + mc_results.states.snapshot(quarter_index * 3).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 3])) + ) # 75% + mc_results.states.snapshot(-1).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[-1])) + ) # 100% mc_results.time_of_event.plot_hist() - + # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/13d857acf37346324d5495732f8cc44e/10_Prognostics Server.ipynb b/docs/_downloads/13d857acf37346324d5495732f8cc44e/10_Prognostics Server.ipynb new file mode 100644 index 00000000..f7300291 --- /dev/null +++ b/docs/_downloads/13d857acf37346324d5495732f8cc44e/10_Prognostics Server.ipynb @@ -0,0 +1,568 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Prognostics Server (prog_server)\n", + "\n", + "The ProgPy Server (`prog_server`) is a simplified implementation of a Service-Oriented Architecture (SOA) for performing prognostics (estimation of time until events and future system states) of engineering systems. `prog_server` is a wrapper around the ProgPy package, allowing one or more users to access the features of these packages through a REST API. The package is intended to be used as a research tool to prototype and benchmark Prognostics As-A-Service (PaaS) architectures and work on the challenges facing such architectures, including Generality, Communication, Security, Environmental Complexity, Utility, and Trust.\n", + "\n", + "The ProgPy Server is actually two packages, `prog_server` and `prog_client`. The `prog_server` package is a prognostics server that provides the REST API. The `prog_client` package is a python client that provides functions to interact with the server via the REST API.\n", + "\n", + "**TODO(CT): IMAGE- server with clients**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Installing](#Installing)\n", + "* [Starting prog_server](#Starting-prog_server)\n", + " * [Command Line](#Command-Line)\n", + " * [Programmtically](#Programatically)\n", + "* [Using prog_server with prog_client](#Using-prog_server-with-prog_client)\n", + " * [Online Prognostics Example](#Online-Prognostics-Example)\n", + " * [Option Scoring Example](#Option-scoring-example)\n", + "* [Using prog_server with REST Interface](#Using-prog_server-with-REST-Interface)\n", + "* [Custom Models](#Custom-Models)\n", + "* [Closing prog_server](#Closing-prog_server)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installing\n", + "\n", + "`prog_server` can be installed using pip\n", + "\n", + "```console\n", + "$ pip install prog_server\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Starting prog_server" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`prog_server` can be started through the command line or programatically (i.e., in a python script). Once the server is started, it will take a short time to initialize. Then, it will start receiving requests for sessions from clients using `prog_client`, or interacting directly using the REST interface." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Command Line\n", + "Generally, you can start `prog_server` by running the module, like this:\n", + "\n", + "```console\n", + "$ python -m prog_server\n", + "```\n", + "\n", + "Note that you can force the server to start in debug mode using the `debug` flag. For example, `python -m prog_server --debug`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Programatically\n", + "There are two methods to start the `prog_server` programatically in python. The first, below, is non-blocking and allows users to perform other functions while the server is running." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import prog_server\n", + "\n", + "prog_server.start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When starting a server, users can also provide arguments to customize the way the server runs. Here are the main arguments used:\n", + "\n", + "* `host` (str): Server host address. Defaults to ‘127.0.0.1’\n", + "* `port` (int): Server port address. Defaults to 8555\n", + "* `debug` (bool): If the server is to be started in debug mode\n", + "\n", + "Now `prog_server` is ready to start receiving session requests from users. The server can also be stopped using the `stop()` function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prog_server.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`prog_server` can also be started in blocked mode using the following command:\n", + "\n", + "```python\n", + ">>> prog_server.run()\n", + "```\n", + "\n", + "We will not execute it here, because it would block execution in this notebook until we force quit.\n", + "\n", + "For details on all supported arguments, see the [API Doc](https://nasa.github.io/progpy/api_ref/prog_server/prog_server.html#prog_server.start).\n", + "\n", + "The basis of `prog_server` is the session. Each user creates one or more session. These sessions are each a request for prognostic services. Then the user can interact with the open session. You'll see examples of this in the future sections.\n", + "\n", + "Let's restart the server again so it can be used with the below examples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prog_server.start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using prog_server with prog_client\n", + "\n", + "For users using python, `prog_server` can be interacted with using the `prog_client` package distributed with ProgPy. This section describes a few examples using `prog_client` and `prog_server` together.\n", + "\n", + "We will first import the needed package." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import prog_client" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Online Prognostics Example\n", + "This example creates a session with the server to run prognostics for a Thrown Object, a simplified model of an object thrown into the air. Data is then sent to the server and a prediction is requested. The prediction is then displayed.\n", + "\n", + "**Note: before running this example, make sure `prog_server` is running.**\n", + "\n", + "The first step is to open a session with the server. This starts a session for prognostics with the ThrownObject model, with default parameters. The prediction configuration is updated to have a save frequency of every 1 second." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session = prog_client.Session(\"ThrownObject\", pred_cfg={\"save_freq\": 1})\n", + "print(session) # Printing the Session Information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you were to re-run the lines above, it would start a new session, with a new number.\n", + "\n", + "Next, we need to prepare the data we will use for this example. The data is a dictionary, and the keys are the names of the inputs and outputs in the model with format (time, value).\n", + "\n", + "Note that in an actual application, the data would be received from a sensor or other source. The structure below is used to emulate the sensor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "example_data = [\n", + " (0, {\"x\": 1.83}),\n", + " (0.1, {\"x\": 5.81}),\n", + " (0.2, {\"x\": 9.75}),\n", + " (0.3, {\"x\": 13.51}),\n", + " (0.4, {\"x\": 17.20}),\n", + " (0.5, {\"x\": 20.87}),\n", + " (0.6, {\"x\": 24.37}),\n", + " (0.7, {\"x\": 27.75}),\n", + " (0.8, {\"x\": 31.09}),\n", + " (0.9, {\"x\": 34.30}),\n", + " (1.0, {\"x\": 37.42}),\n", + " (1.1, {\"x\": 40.43}),\n", + " (1.2, {\"x\": 43.35}),\n", + " (1.3, {\"x\": 46.17}),\n", + " (1.4, {\"x\": 48.91}),\n", + " (1.5, {\"x\": 51.53}),\n", + " (1.6, {\"x\": 54.05}),\n", + " (1.7, {\"x\": 56.50}),\n", + " (1.8, {\"x\": 58.82}),\n", + " (1.9, {\"x\": 61.05}),\n", + " (2.0, {\"x\": 63.20}),\n", + " (2.1, {\"x\": 65.23}),\n", + " (2.2, {\"x\": 67.17}),\n", + " (2.3, {\"x\": 69.02}),\n", + " (2.4, {\"x\": 70.75}),\n", + " (2.5, {\"x\": 72.40}),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we can start sending the data to the server, checking periodically to see if there is a completed prediction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from time import sleep\n", + "\n", + "LAST_PREDICTION_TIME = None\n", + "for i in range(len(example_data)):\n", + " # Send data to server\n", + " print(f\"{example_data[i][0]}s: Sending data to server... \", end=\"\")\n", + " session.send_data(time=example_data[i][0], **example_data[i][1])\n", + "\n", + " # Check for a prediction result\n", + " status = session.get_prediction_status()\n", + " if LAST_PREDICTION_TIME != status[\"last prediction\"]:\n", + " # New prediction result\n", + " LAST_PREDICTION_TIME = status[\"last prediction\"]\n", + " print(\"Prediction Completed\")\n", + "\n", + " # Get prediction\n", + " # Prediction is returned as a type uncertain_data, so you can manipulate it like that datatype.\n", + " # See https://nasa.github.io/prog_algs/uncertain_data.html\n", + " t, prediction = session.get_predicted_toe()\n", + " print(f\"Predicted ToE (using state from {t}s): \")\n", + " print(prediction.mean)\n", + "\n", + " # Get Predicted future states\n", + " # You can also get the predicted future states of the model.\n", + " # States are saved according to the prediction configuration parameter 'save_freq' or 'save_pts'\n", + " # In this example we have it setup to save every 1 second.\n", + " # Return type is UnweightedSamplesPrediction (since we're using the monte carlo predictor)\n", + " # See https://nasa.github.io/prog_algs\n", + " t, event_states = session.get_predicted_event_state()\n", + " print(f\"Predicted Event States (using state from {t}s): \")\n", + " es_means = [\n", + " (event_states.times[i], event_states.snapshot(i).mean)\n", + " for i in range(len(event_states.times))\n", + " ]\n", + " for time, es_mean in es_means:\n", + " print(f\"\\t{time}s: {es_mean}\")\n", + "\n", + " # Note: you can also get the predicted future states of the model (see get_predicted_states()) or performance parameters (see get_predicted_performance_metrics())\n", + "\n", + " else:\n", + " print(\"No prediction yet\")\n", + " # No updated prediction, send more data and check again later.\n", + " sleep(0.1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that the prediction wasn't updated every time step. It takes a bit of time to perform a prediction.\n", + "\n", + "Note that we can also get the model from `prog_server` to work with directly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = session.get_model()\n", + "\n", + "print(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Option Scoring Example\n", + "\n", + "This example creates a session with the server to run prognostics for a `BatteryCircuit` model. Three options with different loading profiles are compared by creating a session for each option and comparing the resulting prediction metrics.\n", + "\n", + "First step is to prepare load profiles to compare. Each load profile has format `Array[Dict]`. Where each dict is in format `{TIME: LOAD}`, where `TIME` is the start of that loading in seconds. `LOAD` is a dict with keys corresponding to model.inputs. Note that the dict must be in order of increasing time.\n", + "\n", + "Here we introduce 3 load profiles to be used with simulation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plan0 = {0: {\"i\": 2}, 600: {\"i\": 1}, 900: {\"i\": 4}, 1800: {\"i\": 2}, 3000: {\"i\": 3}}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plan1 = {0: {\"i\": 3}, 900: {\"i\": 2}, 1000: {\"i\": 3.5}, 2000: {\"i\": 2.5}, 2300: {\"i\": 3}}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plan2 = {\n", + " 0: {\"i\": 1.25},\n", + " 800: {\"i\": 2},\n", + " 1100: {\"i\": 2.5},\n", + " 2200: {\"i\": 6},\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "LOAD_PROFILES = [plan0, plan1, plan2]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next step is to open a session with the battery circuit model for each of the 3 plans. We are specifying a time of interest of 2000 seconds (for the sake of a demo). This could be the end of a mission/session, or some inspection time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sessions = [\n", + " prog_client.Session(\n", + " \"BatteryCircuit\",\n", + " pred_cfg={\"save_pts\": [2000], \"save_freq\": 1e99, \"n_samples\": 15},\n", + " load_est=\"Variable\",\n", + " load_est_cfg=LOAD_PROFILES[i],\n", + " )\n", + " for i in range(len(LOAD_PROFILES))\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's wait for prognostics to be complete." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for session in sessions:\n", + " sessions_in_progress = True\n", + " while sessions_in_progress:\n", + " sessions_in_progress = False\n", + " status = session.get_prediction_status()\n", + " if status[\"in progress\"] != 0:\n", + " print(f\"\\tSession {session.session_id} is still in progress\")\n", + " sessions_in_progress = True\n", + " sleep(5)\n", + " print(f\"\\tSession {session.session_id} complete\")\n", + "print(\"All sessions complete\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that the sessions are complete, we can get the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = [session.get_predicted_toe()[1] for session in sessions]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's compare results. Let's look at the mean Time to Event (`ToE`):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Mean ToE:\")\n", + "best_toe = 0\n", + "best_plan = None\n", + "for i in range(len(results)):\n", + " mean_toe = results[i].mean[\"EOD\"]\n", + " print(f\"\\tOption {i}: {mean_toe:0.2f}s\")\n", + " if mean_toe > best_toe:\n", + " best_toe = mean_toe\n", + " best_plan = i\n", + "print(f\"Best option using method 1: Option {best_plan}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As a second metric, let's look at the `SOC` at our point of interest (2000 seconds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "best_soc = 0\n", + "best_plan = None\n", + "soc = [session.get_predicted_event_state()[1] for session in sessions]\n", + "for i in range(len(soc)):\n", + " mean_soc = soc[i].snapshot(-1).mean[\"EOD\"]\n", + " print(f\"\\tOption {i}: {mean_soc:0.3f} SOC\")\n", + " if mean_soc > best_soc:\n", + " best_soc = mean_soc\n", + " best_plan = i\n", + "print(f\"Best option using method 2: Option {best_plan}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Other metrics can be used as well, like probability of mission success given a certain mission time, uncertainty in `ToE` estimate, final state at end of mission, among others." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using prog_server with REST Interface\n", + "\n", + "Communication with ProgPy is through a REST interface. The REST API is described here: [prog_server REST API](https://app.swaggerhub.com/apis-docs/teubert/prog_server/).\n", + "\n", + "Most programming languages have a way of interacting with REST APIs (either native or through a package/library). `curl` requests can also be used by command line or apps like Postman." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Models\n", + "**A version of this section will be added in release v1.9** " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Closing prog_server\n", + "When you're done using prog_server, make sure you turn off the server." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prog_server.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section, we have demonstrated how to use the ProgPy server, including `prog_server` and `prog_client`. This is the last notebook in the ProgPy tutorial series.\n", + "\n", + "For more information about ProgPy in general, check out the __[00 Intro](00_Intro.ipynb)__ notebook and [ProgPy documentation](https://nasa.github.io/progpy/index.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.11.0 ('env': venv)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "71ccad9e81d0b15f7bb5ef75e2d2ca570011b457fb5a41421e3ae9c0e4c33dfc" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/13ddd0569b346d75f7b8739f6b573f1f/custom_model.py b/docs/_downloads/13ddd0569b346d75f7b8739f6b573f1f/custom_model.py new file mode 100644 index 00000000..f0b09c66 --- /dev/null +++ b/docs/_downloads/13ddd0569b346d75f7b8739f6b573f1f/custom_model.py @@ -0,0 +1,147 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. +# This ensures that the directory containing examples is in the python search directories + +""" +Example building a custom model with LSTMStateTransitionModel. + +.. dropdown:: More details + + For most cases, you will be able to use the standard LSTMStateTransitionModel.from_data class with configuration (see the LSTMStateTransitionModel class for more details). However, sometimes you might want to add custom layers, or other complex components. In that case, you will build a custom model and pass it into LSTMStateTransitionModel. + + In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. + + We build and fit a custom model using keras.layers. Finally, we compare performance to the standard format and the original model. +""" + +import matplotlib.pyplot as plt +import numpy as np +from progpy.data_models import LSTMStateTransitionModel +from progpy.models import BatteryElectroChemEOD +from tensorflow import keras +from tensorflow.keras import layers + + +def run_example(): + WINDOW = 12 + + print("Generating data...") + batt = BatteryElectroChemEOD() + future_loading_eqns = [ + lambda t, x=None: batt.InputContainer({"i": 1 + 1.4 * load}) + for load in range(6) + ] + # Generate data with different loading and step sizes + # Adding the step size as an element of the output + input_data = [] + output_data = [] + for i in range(9): + dt = i / 3 + 0.25 + for loading_eqn in future_loading_eqns: + d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) + u = np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float + ) + z = d.outputs + + if len(u) > WINDOW: + input_data.append(u) + output_data.append(z) + + # Step 2: Build standard model + print("Building standard model...") + m_batt = LSTMStateTransitionModel.from_data( + inputs=input_data, + outputs=output_data, + window=WINDOW, + epochs=30, + units=64, # Additional units given the increased complexity of the system + input_keys=["i", "dt"], + output_keys=["t", "v"], + ) + m_batt.plot_history() + + # Step 3: Build custom model + print("Building custom model...") + (u_all, z_all, _, _) = LSTMStateTransitionModel.pre_process_data( + input_data, output_data, window=12 + ) + + # Normalize + n_inputs = len(input_data[0][0]) + u_mean = np.mean(u_all[:, 0, :n_inputs], axis=0) + u_std = np.std(u_all[:, 0, :n_inputs], axis=0) + # If there's no variation- don't normalize + u_std[u_std == 0] = 1 + z_mean = np.mean(z_all, axis=0) + z_std = np.std(z_all, axis=0) + # If there's no variation- don't normalize + z_std[z_std == 0] = 1 + + # Add output (since z_t-1 is last input) + u_mean = np.hstack((u_mean, z_mean)) + u_std = np.hstack((u_std, z_std)) + + u_all = (u_all - u_mean) / u_std + z_all = (z_all - z_mean) / z_std + + # u_mean and u_std act on the column vector form (from inputcontainer) + # so we need to transpose them to a column vector + normalization = (u_mean[np.newaxis].T, u_std[np.newaxis].T, z_mean, z_std) + + callbacks = [ + keras.callbacks.ModelCheckpoint("jena_sense.keras", save_best_only=True) + ] + inputs = keras.Input(shape=u_all.shape[1:]) + x = layers.Bidirectional(layers.LSTM(128))(inputs) + x = layers.Dropout(0.1)(x) + x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x) + model = keras.Model(inputs, x) + model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"]) + history = model.fit( + u_all, z_all, epochs=30, callbacks=callbacks, validation_split=0.1 + ) + + # Step 4: Build LSTMStateTransitionModel + m_custom = LSTMStateTransitionModel( + model, + normalization=normalization, + input_keys=["i", "dt"], + output_keys=["t", "v"], + history=history, # Provide history so plot_history will work + ) + m_custom.plot_history() + + # Step 5: Simulate + print("Simulating...") + t_counter = 0 + x_counter = batt.initialize() + + def future_loading(t, x=None): + return batt.InputContainer({"i": 3}) + + def future_loading2(t, x=None): + nonlocal t_counter, x_counter + z = batt.output(x_counter) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) + x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) + t_counter = t + return z + + data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) + results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) + results_custom = m_custom.simulate_to( + data.times[-1], future_loading2, dt=1, save_freq=1 + ) + + # Step 6: Compare performance + print("Comparing performance...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) + results_custom.outputs.plot(title="custom model", compact=False) + plt.show() + + +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/160687b265770e7120266d22a6015f55/sim.py b/docs/_downloads/160687b265770e7120266d22a6015f55/sim.py index 1bc215d1..d433126f 100644 --- a/docs/_downloads/160687b265770e7120266d22a6015f55/sim.py +++ b/docs/_downloads/160687b265770e7120266d22a6015f55/sim.py @@ -13,6 +13,7 @@ # VVV Uncomment this to use Electro Chemistry Model VVV # Battery = BatteryElectroChem + def run_example(): # Step 1: Create a model object batt = Battery() @@ -20,13 +21,14 @@ def run_example(): # Step 2: Define future loading function - here we're using a piecewise scheme future_loading = Piecewise( batt.InputContainer, - [600, 900, 1800, 3600, float('inf')], - {'i': [2, 1, 4, 2, 3]}) + [600, 900, 1800, 3600, float("inf")], + {"i": [2, 1, 4, 2, 3]}, + ) # simulate for 200 seconds - print('\n\n------------------------------------------------') - print('Simulating for 200 seconds\n\n') - simulated_results = batt.simulate_to(200, future_loading, print = True, progress = True) + print("\n\n------------------------------------------------") + print("Simulating for 200 seconds\n\n") + simulated_results = batt.simulate_to(200, future_loading, print=True, progress=True) # The result of the simulation is now stored in simulated_results. # You can access the results by accessing the individual variables: # times, inputs, states, outputs, event_states @@ -36,25 +38,35 @@ def run_example(): simulated_results.outputs.plot() # or, with configuration - simulated_results.outputs.plot(compact = False, suptitle = 'Outputs', title = 'example title', xlabel = 'time', ylabel = 'output') + simulated_results.outputs.plot( + compact=False, + suptitle="Outputs", + title="example title", + xlabel="time", + ylabel="output", + ) # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") options = { - 'save_freq': 100, # Frequency at which results are saved - 'dt': 2, # Timestep - 'print': True, - 'progress': True + "save_freq": 100, # Frequency at which results are saved + "dt": 2, # Timestep + "print": True, + "progress": True, } simulated_results = batt.simulate_to_threshold(future_loading, **options) # Alternately, you can set a max step size and allow step size to be adjusted automatically - options['dt'] = ('auto', 2) - # set step size automatically, with a max of 2 seconds. Setting max step size automatically will allow the + options["dt"] = ("auto", 2) + # set step size automatically, with a max of 2 seconds. Setting max step size automatically will allow the # save points, stop points, and future loading change points to be met exactly - options['save_freq'] = 201 # Save every 201 seconds - options['save_pts'] = [250, 772, 1023] # Special points we should like to see reported + options["save_freq"] = 201 # Save every 201 seconds + options["save_pts"] = [ + 250, + 772, + 1023, + ] # Special points we should like to see reported simulated_results = batt.simulate_to_threshold(future_loading, **options) # Note that even though the step size is 2, the odd points in the save frequency are met perfectly, dt is adjusted automatically to capture the save points @@ -65,20 +77,21 @@ def run_example(): # This is the maximum sustainable current that can be drawn # from the battery at steady-state. It decreases with discharge # This information can be used to inform planning - pm = [batt.performance_metrics(x)['max_i'][0] for x in simulated_results.states] + pm = [batt.performance_metrics(x)["max_i"][0] for x in simulated_results.states] plt.figure() plt.plot(simulated_results.times, pm) - plt.xlabel('Time (s)') - plt.ylabel('Maximum Sustainable Current Draw (amps)') + plt.xlabel("Time (s)") + plt.ylabel("Maximum Sustainable Current Draw (amps)") # You can also change the integration method. For example: - options['integration_method'] = 'rk4' # Using Runge-Kutta 4th order + options["integration_method"] = "rk4" # Using Runge-Kutta 4th order simulated_results_rk4 = batt.simulate_to_threshold(future_loading, **options) simulated_results_rk4.outputs.plot(compact=False) plt.show() + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/1859abc939b84f7d8c9c812b3aca4b6c/visualize.ipynb b/docs/_downloads/1859abc939b84f7d8c9c812b3aca4b6c/visualize.ipynb index 89ed1256..645869f0 100644 --- a/docs/_downloads/1859abc939b84f7d8c9c812b3aca4b6c/visualize.ipynb +++ b/docs/_downloads/1859abc939b84f7d8c9c812b3aca4b6c/visualize.ipynb @@ -1,54 +1,104 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating the Visualization Module. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\nfrom progpy.visualize import plot_timeseries\nfrom progpy.models.thrown_object import ThrownObject\n\ndef run_example():\n print('Visualize Module Example')\n m = ThrownObject()\n\n # Step 2: Setup for simulation \n def future_load(t, x=None):\n return {}\n\n # Step 3: Simulate to impact\n event = 'impact'\n options={'dt':0.005, 'save_freq':1}\n simulated_results = m.simulate_to_threshold(future_load,\n threshold_keys=[event], \n **options)\n \n\n # Display states\n # ==============\n plot_timeseries(simulated_results.times, simulated_results.states, \n options = {'compact': False, 'suptitle': 'state evolution', 'title': True,\n 'xlabel': 'time', 'ylabel': {'x': 'position', 'v': 'velocity'}, 'display_labels': 'minimal'},\n legend = {'display': True, 'display_at_subplot': 'all'} )\n plot_timeseries(simulated_results.times, simulated_results.states, options = {'compact': True, 'suptitle': 'state evolution', 'title': 'example title',\n 'xlabel': 'time', 'ylabel':'position'})\n plt.show()\n\nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating the Visualization Module. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "from progpy.visualize import plot_timeseries\n", + "from progpy.models.thrown_object import ThrownObject\n", + "\n", + "\n", + "def run_example():\n", + " print(\"Visualize Module Example\")\n", + " m = ThrownObject()\n", + "\n", + " # Step 2: Setup for simulation\n", + " def future_load(t, x=None):\n", + " return {}\n", + "\n", + " # Step 3: Simulate to impact\n", + " event = \"impact\"\n", + " options = {\"dt\": 0.005, \"save_freq\": 1}\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], **options\n", + " )\n", + "\n", + " # Display states\n", + " # ==============\n", + " plot_timeseries(\n", + " simulated_results.times,\n", + " simulated_results.states,\n", + " options={\n", + " \"compact\": False,\n", + " \"suptitle\": \"state evolution\",\n", + " \"title\": True,\n", + " \"xlabel\": \"time\",\n", + " \"ylabel\": {\"x\": \"position\", \"v\": \"velocity\"},\n", + " \"display_labels\": \"minimal\",\n", + " },\n", + " legend={\"display\": True, \"display_at_subplot\": \"all\"},\n", + " )\n", + " plot_timeseries(\n", + " simulated_results.times,\n", + " simulated_results.states,\n", + " options={\n", + " \"compact\": True,\n", + " \"suptitle\": \"state evolution\",\n", + " \"title\": \"example title\",\n", + " \"xlabel\": \"time\",\n", + " \"ylabel\": \"position\",\n", + " },\n", + " )\n", + " plt.show()\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/1b01e3ae29c4c530a1ef38a01e3e9558/future_loading.py b/docs/_downloads/1b01e3ae29c4c530a1ef38a01e3e9558/future_loading.py index dabb15e3..afbf1fe7 100644 --- a/docs/_downloads/1b01e3ae29c4c530a1ef38a01e3e9558/future_loading.py +++ b/docs/_downloads/1b01e3ae29c4c530a1ef38a01e3e9558/future_loading.py @@ -2,52 +2,55 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use future loading. +Example demonstrating ways to use future loading. """ from progpy.models import BatteryCircuit from statistics import mean from numpy.random import normal -def run_example(): + +def run_example(): m = BatteryCircuit() - ## Example 1: Variable loading + ## Example 1: Variable loading def future_loading(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 600): + # Variable (piece-wise) future loading scheme + if t < 600: i = 2 - elif (t < 900): + elif t < 900: i = 1 - elif (t < 1800): + elif t < 1800: i = 4 - elif (t < 3000): - i = 2 + elif t < 3000: + i = 2 else: i = 3 - return m.InputContainer({'i': i}) - + return m.InputContainer({"i": i}) + # Simulate to threshold options = { - 'save_freq': 100, # Frequency at which results are saved - 'dt': 2 # Timestep + "save_freq": 100, # Frequency at which results are saved + "dt": 2, # Timestep } simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Variable Load Current (amps)') - simulated_results.event_states.plot(ylabel = 'Variable Load Event State') + simulated_results.inputs.plot(ylabel="Variable Load Current (amps)") + simulated_results.event_states.plot(ylabel="Variable Load Event State") - ## Example 2: Moving Average loading - # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system, + ## Example 2: Moving Average loading + # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system, # but dont have a good way of predicting it, and you expect loading to be steady def future_loading(t, x=None): return future_loading.load - future_loading.load = m.InputContainer({key : 0 for key in m.inputs}) + + future_loading.load = m.InputContainer({key: 0 for key in m.inputs}) # Lets define another function to handle the moving average logic - window = 10 # Number of elements in window + window = 10 # Number of elements in window + def moving_avg(i): for key in m.inputs: moving_avg.loads[key].append(i[key]) @@ -55,69 +58,93 @@ def moving_avg(i): del moving_avg.loads[key][0] # Remove first item # Update future loading eqn - future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs} - moving_avg.loads = {key : [] for key in m.inputs} + future_loading.load = {key: mean(moving_avg.loads[key]) for key in m.inputs} + + moving_avg.loads = {key: [] for key in m.inputs} - # OK, we've setup the logic of the moving average. + # OK, we've setup the logic of the moving average. # Now lets say you have some measured loads to add - measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2] - + measured_loads = [ + 10, + 11.5, + 12.0, + 8, + 2.1, + 1.8, + 1.99, + 2.0, + 2.01, + 1.89, + 1.92, + 2.01, + 2.1, + 2.2, + ] + # We're going to feed these into the future loading eqn for load in measured_loads: - moving_avg({'i': load}) - + moving_avg({"i": load}) + # Now the future_loading eqn is setup to use the moving average of whats been seen # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)') - simulated_results.event_states.plot(ylabel = 'Moving Average Event State') + simulated_results.inputs.plot(ylabel="Moving Average Current (amps)") + simulated_results.event_states.plot(ylabel="Moving Average Event State") # In this case, this estimate is wrong because loading will not be steady, but at least it would give you an approximation. - # If more measurements are received, the user could estimate the moving average here and then run a new simulation. + # If more measurements are received, the user could estimate the moving average here and then run a new simulation. - ## Example 3: Gaussian Distribution - # In this example we will still be doing a variable loading like the first option, but we are going to use a - # gaussian distribution for each input. + ## Example 3: Gaussian Distribution + # In this example we will still be doing a variable loading like the first option, but we are going to use a + # gaussian distribution for each input. def future_loading(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 600): + # Variable (piece-wise) future loading scheme + if t < 600: i = 2 - elif (t < 900): + elif t < 900: i = 1 - elif (t < 1800): + elif t < 1800: i = 4 - elif (t < 3000): - i = 2 + elif t < 3000: + i = 2 else: i = 3 - return m.InputContainer({'i': normal(i, future_loading.std)}) + return m.InputContainer({"i": normal(i, future_loading.std)}) + future_loading.std = 0.2 # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Variable Gaussian Current (amps)') - simulated_results.event_states.plot(ylabel = 'Variable Gaussian Event State') + simulated_results.inputs.plot(ylabel="Variable Gaussian Current (amps)") + simulated_results.event_states.plot(ylabel="Variable Gaussian Event State") # Example 4: Gaussian- increasing with time - # For this we're using moving average. This is realistic because the further out from current time you get, - # the more uncertainty there is in your prediction. + # For this we're using moving average. This is realistic because the further out from current time you get, + # the more uncertainty there is in your prediction. def future_loading(t, x=None): - std = future_loading.base_std + future_loading.std_slope * (t - future_loading.t) - return {key : normal(future_loading.load[key], std) for key in future_loading.load.keys()} - future_loading.load = {key : 0 for key in m.inputs} + std = future_loading.base_std + future_loading.std_slope * ( + t - future_loading.t + ) + return { + key: normal(future_loading.load[key], std) + for key in future_loading.load.keys() + } + + future_loading.load = {key: 0 for key in m.inputs} future_loading.base_std = 0.001 future_loading.std_slope = 1e-4 future_loading.t = 0 # Lets define another function to handle the moving average logic window = 10 # Number of elements in window + def moving_avg(i): for key in m.inputs: moving_avg.loads[key].append(i[key]) @@ -125,52 +152,76 @@ def moving_avg(i): del moving_avg.loads[key][0] # Remove first item # Update future loading eqn - future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs} - moving_avg.loads = {key : [] for key in m.inputs} + future_loading.load = {key: mean(moving_avg.loads[key]) for key in m.inputs} + + moving_avg.loads = {key: [] for key in m.inputs} - # OK, we've setup the logic of the moving average. + # OK, we've setup the logic of the moving average. # Now lets say you have some measured loads to add - measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2] - + measured_loads = [ + 10, + 11.5, + 12.0, + 8, + 2.1, + 1.8, + 1.99, + 2.0, + 2.01, + 1.89, + 1.92, + 2.01, + 2.1, + 2.2, + ] + # We're going to feed these into the future loading eqn for load in measured_loads: - moving_avg({'i': load}) + moving_avg({"i": load}) # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)') - simulated_results.event_states.plot(ylabel = 'Moving Average Event State') - + simulated_results.inputs.plot(ylabel="Moving Average Current (amps)") + simulated_results.event_states.plot(ylabel="Moving Average Event State") + # In this example future_loading.t has to be updated with current time before each prediction. - + # Example 5 Function of state # here we're pretending that input is a function of SOC. It increases as we approach SOC def future_loading(t, x=None): if x is not None: event_state = future_loading.event_state(x) - return m.InputContainer({'i': future_loading.start + (1-event_state['EOD']) * future_loading.slope}) # default - return m.InputContainer({'i': future_loading.start}) + return m.InputContainer( + { + "i": future_loading.start + + (1 - event_state["EOD"]) * future_loading.slope + } + ) # default + return m.InputContainer({"i": future_loading.start}) + future_loading.t = 0 future_loading.event_state = m.event_state - future_loading.slope = 2 # difference between input with EOD = 1 and 0. + future_loading.slope = 2 # difference between input with EOD = 1 and 0. future_loading.start = 0.5 # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)') - simulated_results.event_states.plot(ylabel = 'Moving Average Event State') + simulated_results.inputs.plot(ylabel="Moving Average Current (amps)") + simulated_results.event_states.plot(ylabel="Moving Average Event State") # In this example future_loading.t has to be updated with current time before each prediction. # Show plots import matplotlib.pyplot as plt + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/1b9c2fb843346c9087fbd3fb7ca4c46b/dataset.py b/docs/_downloads/1b9c2fb843346c9087fbd3fb7ca4c46b/dataset.py new file mode 100644 index 00000000..950ee2ca --- /dev/null +++ b/docs/_downloads/1b9c2fb843346c9087fbd3fb7ca4c46b/dataset.py @@ -0,0 +1,77 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. + +""" +Example downloading and using a NASA prognostics dataset. + +.. dropdown:: More details + + In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. +""" + +import matplotlib.pyplot as plt +import pickle +from progpy.datasets import nasa_battery + +DATASET_ID = 1 + + +def run_example(): + # Step 1: Download and import the dataset for a single battery + # Note: This may take some time + print("Downloading... ", end="") + (desc, data) = nasa_battery.load_data(DATASET_ID) + print("done") + + # We recommend saving the dataset to disk for future use + # This way you don't have to download it each time + pickle.dump((desc, data), open(f"dataset_{DATASET_ID}.pkl", "wb")) + + # Step 2: Access the dataset description + print(f"\nDataset {DATASET_ID}") + print(desc["description"]) + print(f"Procedure: {desc['procedure']}") + + # Step 3: Access the dataset data + # Data is in format [run_id][time][variable] + # For the battery the variables are + # 0: relativeTime (since beginning of run) + # 1: current (amps) + # 2: voltage + # 3: temperature (°C) + # so that data[a][b, 3] is the temperature at time index b (relative to the start of the run) for run a + print(f"\nNumber of runs: {len(data)}") + print("\nAnalyzing run 4") + print(f"number of time indices: {len(data[4])}") + print(f"Details of run 4: {desc['runs'][4]}") + + # Plot the run + plt.figure() + plt.subplot(2, 1, 1) + plt.plot(data[4]["relativeTime"], data[4]["current"]) + plt.ylabel("Current (A)") + + plt.subplot(2, 1, 2) + plt.plot(data[4]["relativeTime"], data[4]["voltage"]) + plt.ylabel("Voltage (V)") + plt.xlabel("Time (s)") + plt.title("Run 4") + + # Graph all reference discharge profiles + indices = [ + i + for i, x in enumerate(desc["runs"]) + if "reference discharge" in x["desc"] and "rest" not in x["desc"] + ] + plt.figure() + for i in indices: + plt.plot(data[i]["relativeTime"], data[i]["voltage"], label=f"Run {i}") + plt.title("Reference discharge profiles") + plt.xlabel("Time (s)") + plt.ylabel("Voltage (V)") + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/1e173741004c18e2e2f97234d3eb4521/sensitivity.py b/docs/_downloads/1e173741004c18e2e2f97234d3eb4521/sensitivity.py new file mode 100644 index 00000000..02a7fae7 --- /dev/null +++ b/docs/_downloads/1e173741004c18e2e2f97234d3eb4521/sensitivity.py @@ -0,0 +1,74 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. + +""" +Example performing a sensitivity analysis on a new model. +""" + +import numpy as np + +# Deriv prog model was selected because the model can be described as x' = x + dx*dt +from progpy.models.thrown_object import ThrownObject + + +def run_example(): + # Demo model + # Step 1: Create instance of model + m = ThrownObject() + + # Step 2: Setup range on parameters considered + thrower_height_range = np.arange(1.2, 2.1, 0.1) + + # Step 3: Sim for each + event = "impact" + eods = np.empty(len(thrower_height_range)) + for i, thrower_height in zip( + range(len(thrower_height_range)), thrower_height_range + ): + m.parameters["thrower_height"] = thrower_height + simulated_results = m.simulate_to_threshold(events=event, dt=1e-3, save_freq=10) + eods[i] = simulated_results.times[-1] + + # Step 4: Analysis + print( + "For a reasonable range of heights, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / ( + thrower_height_range[-1] - thrower_height_range[0] + ) + print( + " - Average sensitivity: {} s per cm height".format( + round(sensitivity / 100, 6) + ) + ) + print(" - It seems impact time is not very sensitive to thrower's height") + + # Now lets repeat for throw speed + throw_speed_range = np.arange(20, 40, 1) + eods = np.empty(len(throw_speed_range)) + for i, throw_speed in zip(range(len(throw_speed_range)), throw_speed_range): + m.parameters["throwing_speed"] = throw_speed + simulated_results = m.simulate_to_threshold( + events=event, options={"dt": 1e-3, "save_freq": 10} + ) + eods[i] = simulated_results.times[-1] + + print( + "\nFor a reasonable range of throwing speeds, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / (throw_speed_range[-1] - throw_speed_range[0]) + print( + " - Average sensitivity: {} s per m/s speed".format( + round(sensitivity / 100, 6) + ) + ) + print(" - It seems impact time is much more dependent on throwing speed") + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/1e5a767a460fa630bbb4ed98c93e87bf/events.py b/docs/_downloads/1e5a767a460fa630bbb4ed98c93e87bf/events.py index 756a87b7..082b575f 100644 --- a/docs/_downloads/1e5a767a460fa630bbb4ed98c93e87bf/events.py +++ b/docs/_downloads/1e5a767a460fa630bbb4ed98c93e87bf/events.py @@ -2,27 +2,29 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example further illustrating the concept of 'events' which generalizes EOL. +Example further illustrating the concept of 'events' which generalizes EOL. .. dropdown:: More details :term:`Events` is the term used to describe something to be predicted. Generally in the PHM community these are referred to as End of Life (EOL). However, they can be much more. - In prog_models, events can be anything that needs to be predicted. Events can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). + In prog_models, events can be anything that needs to be predicted. Events can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). - This example demonstrates how events can be used in your applications. + This example demonstrates how events can be used in your applications. """ + import matplotlib.pyplot as plt from progpy.loading import Piecewise from progpy.models import BatteryElectroChemEOD + def run_example(): # Example: Warning thresholds # In this example we will use the battery model # We of course are interested in end of discharge, but for this example we # have a requirement that says the battery must not fall below 5% State of Charge (SOC) # Note: SOC is the event state for the End of Discharge (EOD) event - # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occurred. + # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occurred. # So, 5% SOC corresponds to an 'EOD' event state of 0.05 # Additionally, we have two warning thresholds (yellow and red) @@ -32,7 +34,11 @@ def run_example(): # Step 1: Extend the battery model to define the additional events class MyBatt(BatteryElectroChemEOD): - events = BatteryElectroChemEOD.events + ['EOD_warn_yellow', 'EOD_warn_red', 'EOD_requirement_threshold'] + events = BatteryElectroChemEOD.events + [ + "EOD_warn_yellow", + "EOD_warn_red", + "EOD_requirement_threshold", + ] def event_state(self, state): # Get event state from parent @@ -41,9 +47,15 @@ def event_state(self, state): # Add yellow, red, and failure states by scaling EOD state # Here we scale so the threshold SOC is 0 by their associated events, while SOC of 1 is still 1 # For example, for yellow we want EOD_warn_yellow to be 1 when SOC is 1, and 0 when SOC is YELLOW_THRESH or lower - event_state['EOD_warn_yellow'] = (event_state['EOD']-YELLOW_THRESH)/(1-YELLOW_THRESH) - event_state['EOD_warn_red'] = (event_state['EOD']-RED_THRESH)/(1-RED_THRESH) - event_state['EOD_requirement_threshold'] = (event_state['EOD']-THRESHOLD)/(1-THRESHOLD) + event_state["EOD_warn_yellow"] = (event_state["EOD"] - YELLOW_THRESH) / ( + 1 - YELLOW_THRESH + ) + event_state["EOD_warn_red"] = (event_state["EOD"] - RED_THRESH) / ( + 1 - RED_THRESH + ) + event_state["EOD_requirement_threshold"] = ( + event_state["EOD"] - THRESHOLD + ) / (1 - THRESHOLD) # Return return event_state @@ -54,9 +66,11 @@ def threshold_met(self, x): # Add yell and red states from event_state event_state = self.event_state(x) - t_met['EOD_warn_yellow'] = event_state['EOD_warn_yellow'] <= 0 - t_met['EOD_warn_red'] = event_state['EOD_warn_red'] <= 0 - t_met['EOD_requirement_threshold'] = event_state['EOD_requirement_threshold'] <= 0 + t_met["EOD_warn_yellow"] = event_state["EOD_warn_yellow"] <= 0 + t_met["EOD_warn_red"] = event_state["EOD_warn_red"] <= 0 + t_met["EOD_requirement_threshold"] = ( + event_state["EOD_requirement_threshold"] <= 0 + ) return t_met @@ -65,20 +79,22 @@ def threshold_met(self, x): # 2a: Setup model - # Variable (piece-wise) future loading scheme - # For a battery, future loading is in term of current 'i' in amps. + # Variable (piece-wise) future loading scheme + # For a battery, future loading is in term of current 'i' in amps. future_loading = Piecewise( - m.InputContainer, - [600, 900, 1800, 3000, float('inf')], - {'i': [2, 1, 4, 2, 3]}) - + m.InputContainer, [600, 900, 1800, 3000, float("inf")], {"i": [2, 1, 4, 2, 3]} + ) + # 2b: Simulate to threshold - simulated_results = m.simulate_to_threshold(future_loading, threshold_keys=['EOD'], print = True) + simulated_results = m.simulate_to_threshold( + future_loading, threshold_keys=["EOD"], print=True + ) # 2c: Plot results simulated_results.event_states.plot() plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/20cd83c083accc07bf830bfbaae95509/01_Simulation.ipynb b/docs/_downloads/20cd83c083accc07bf830bfbaae95509/01_Simulation.ipynb new file mode 100644 index 00000000..a480ed98 --- /dev/null +++ b/docs/_downloads/20cd83c083accc07bf830bfbaae95509/01_Simulation.ipynb @@ -0,0 +1,2166 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1. Simulating with Prognostics Models\n", + "\n", + "One of the most basic of functions for models is simulation. Simulation is the process of predicting the evolution of a [system's state](https://nasa.github.io/progpy/glossary.html#term-state) with time. Simulation is the foundation of prediction (see __[08 Prediction](08_Prediction.ipynb)__). Unlike full prediction, simulation does not include uncertainty in the state and other product (e.g., [output](https://nasa.github.io/progpy/glossary.html#term-output)) representation.\n", + "\n", + "In this notebook, we will introduce simulating to a specific time (e.g., 3 seconds) using the `simulate_to` method and simulating until a threshold is met (rather than a defined time) using `simulate_to_threshold`. We will also explore how to make simulations more concrete with [future loading](https://nasa.github.io/progpy/glossary.html#term-future-load) and other advanced features.\n", + "\n", + "***Note**: Before running this example make sure you have [ProgPy installed](https://nasa.github.io/progpy/#installing-progpy) and up to date.*" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Basic Simulation to a Time](#Basic-Simulation-to-a-Time)\n", + "* [Simulating to Threshold](#Simulating-to-Threshold)\n", + "* [Future Loading](#Future-Loading)\n", + " * [Piecewise Load](#Piecewise-Load)\n", + " * [Moving Average](#Moving-Average)\n", + " * [Gaussian Noise in Loading](#Gaussian-Noise-in-Loading)\n", + " * [Custom Load Profiles](#Custom-Load-Profiles)\n", + "* [Step Size](#Step-Size)\n", + " * [Basic Step Size](#Basic-Step-Size)\n", + " * [Dynamic Step Size](#Dynamic-Step-Size)\n", + " * [Custom Step Size](#Custom-Step-Size)\n", + "* [Parameters](#Parameters)\n", + "* [Noise](#Noise)\n", + "* [Vectorized Simulation](#Vectorized-Simulation)\n", + "* [Configuring Simulation](#Configuring-Simulation)\n", + " * [Simulating from a Known Time](#Simulating-From-a-Known-Time)\n", + " * [Integration Method](#Integration-Method)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basic Simulation to a Time" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's go through a basic example simulating a model to a specific point in time. In this case we are using the ThrownObject model. ThrownObject is a basic model of an object being thrown up into the air (with resistance) and returning to the ground.\n", + "\n", + "First, we import the model from ProgPy's models subpackage (see __[03 Existing Models](03_Existing%20Models.ipynb)__) and create a model instance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import ThrownObject\n", + "\n", + "m = ThrownObject()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we simulate this model for three seconds. To do this we use the [`simulate_to`](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.simulate_to) method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to(3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It's that simple! We've simulated the model forward three seconds. Let's look in a little more detail at the returned results. \n", + "\n", + "Simulation results consists of 5 different types of information, described below:\n", + "* **times**: Time corresponding to each value.\n", + "* **[inputs](https://nasa.github.io/progpy/glossary.html#term-input)**: Control or loading applied to the system being modeled (e.g., current drawn from a battery). Input is frequently denoted by `u`.\n", + "* **[states](https://nasa.github.io/progpy/glossary.html#term-state)**: Internal variables (typically hidden states) used to represent the state of the system. Can be same as inputs or outputs but do not have to be. State is frequently denoted as `x`.\n", + "* **[outputs](https://nasa.github.io/progpy/glossary.html#term-output)**: Measured sensor values from a system (e.g., voltage and temperature of a battery). Can be estimated from the system state. Output is frequently denoted by `z`.\n", + "* **[event_states](https://nasa.github.io/progpy/glossary.html#term-event-state)**: Progress towards [event](https://nasa.github.io/progpy/glossary.html#term-event) occurring. Defined as a number where an event state of 0 indicates the event has occurred and 1 indicates no progress towards the event (i.e., fully healthy operation for a failure event). For a gradually occurring event (e.g., discharge) the number will progress from 1 to 0 as the event nears. In prognostics, event state is frequently called “State of Health”.\n", + "\n", + "In this case, times are the start and beginning of the simulation ([0, 3]), since we have not yet told the simulator to save intermediate times. The ThrownObject model doesn't have any way of controlling or loading the object, so there are no inputs. The states are position (`x`) and velocity (`v`). This model assumes that you can measure position, so the outputs are just position (`x`). The two events for this model are `falling` (i.e., if the object is falling towards the earth) and `impact` (i.e., the object has impacted the ground). For a real prognostic model, events might be failure modes or warning thresholds." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's inspect the results. First, let's plot the outputs (position)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a model of an object thrown in the air, so we generally expect its path to follow a parabola, but what we see above is linear. This is because there are only two points, the start (0s) and the end (3s). To see the parabola we need more points. This is where `save_freq` and `save_pts` come into play. \n", + "\n", + "`save_freq` is an argument in simulation that specifies a frequency at which you would like to save the results (e.g., 1 seconds), while `save_pts` is used to specify specific times that you would like to save the results (e.g., [1.5, 2.5, 3, 5] seconds).\n", + "\n", + "Now let's repeat the simulation above with a save frequency and plot the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to(3, save_freq=0.5)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can see the start of the parabola path we expected. We dont see the full parabola because we stopped simulation at 3 seconds.\n", + "\n", + "If you look at results.times, you can see that the results were saved every 0.5 seconds during simulation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(results.times)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's look at the event_states (i.e., `falling` and `impact`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = results.event_states.plot(xlabel=\"time (s)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we see that the `falling` event state decreased linerally with time, and was approaching 0. This shows that it was nearly falling when we stopped simulation. The `impact` event state remained at 1, indicating that we had not made any progress towards impact. With this model, `impact` event state only starts decreasing as the object falls. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's take a look at model states. In this case the two states are position (`x`) and velocity (`v`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x = [state[\"x\"] for state in results.states]\n", + "v = [state[\"v\"] for state in results.states]\n", + "plt.plot(results.times, x, label=\"Position (x) [m]\", color=\"tab:blue\")\n", + "plt.plot(results.times, v, label=\"Velocity (v) [m/s]\", color=\"tab:orange\")\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"state\")\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Position will match the output exactly and velocity (`v`) decreases nearly linerally with time due to the constant pull of gravity. The slight non-linerality is due to the effects of drag." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a basic example of simulating to a set time. This is useful information for inspecting or analyzing the behavior of a model or the degredation of a system. There are many useful features that allow for complex simulation, described in the upcoming sections. \n", + "\n", + "Note that this is an example problem. In most cases, the system will have inputs, in which case simulation will require future loading (see Future Loading section, below), and simulation will not be until a time, but until a threshold is met. Simulating to a threshold will be described in the next section." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simulating to Threshold" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the first section we introduced simulating to a set time. For most applications, users are not interested in the system evolution over a certain time period, but instead in simulating to some event of interest.\n", + "\n", + "In this section we will introduce the concept of simulating until an event occurs. This section builds upon the concepts introduced in the previous section.\n", + "\n", + "Just like in the previous section, we will start by preparing the ThrownObject model. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import ThrownObject\n", + "\n", + "m = ThrownObject()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you recall, the ThrownObject model is of an object thrown into the air. The model has two events, `impact` and `falling`. In real prognostic models, these events will likely correspond with some failure, fault, or warning threshold. That said, events can be any event of interest that a user would like to predict. \n", + "\n", + "Now let's repeat the simulation from the previous example, this time simulating until an event has occured by using the [`simulate_to_threshold`](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.simulate_to_threshold) method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that simulation continued beyond the 3 seconds used in the first section. Instead simulation stopped at 4 seconds, at which point the `falling` event state reached 0 and the position (`x`) reached the apogee of its path." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default, `simulate_to_threshold` simulates until the first event occurs. In this case, that's `falling` (i.e., when the object begins falling). For this model `falling` will always occur before `impact`, but for many models you won't have such a strict ordering of events. \n", + "\n", + "For users interested in when a specific event is reached, you can indicate which event(s) you'd like to simulate to using the `events` argument." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5, events=\"impact\")\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the model simulated past the `falling` event until the `impact` event occurred. `events` accepts a single event, or a list of events, so for models with many events you can specify a list of events where any will stop simulation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Frequently users are interested in simulating to a threshold, only if it occurs within some horizon of interest, like a mission time or planning horizon. This is accomplished with the `horizon` keyword argument. \n", + "\n", + "For example, if we were only interested in events occuring in the next 7 seconds we could set `horizon` to 7." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5, events=\"impact\", horizon=7)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that now simulation stopped at 7 seconds, even though the event had not yet occured. If we use a horizon after the event, like 10 seconds, then simulation stops at the event." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5, events=\"impact\", horizon=10)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The 7 and 10 second horizon is used as an example. In most cases, the simulation horizon will be much longer. For example, you can imagine a user who's interested in prognostics for a one hour drone flight might set the horizon to a little over an hour. A user who has a month-long maintenance scheduling window might chose a horizon of one month. \n", + "\n", + "It is good practice to include a horizon with most simulations to prevent simulations continuing indefinitely for the case where the event never happens.\n", + "\n", + "One final note: you can also use the print and progress options to track progress during long simulations, like below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(\n", + " save_freq=0.5, events=\"impact\", print=True, progress=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For most users running this in Jupyter notebook, the output will be truncated, but it gives an idea of what would be shown when selecting these options.\n", + "\n", + "In this example we specified events='impact' to indicate that simulation should stop when the specified event 'impact' is met. By default, the simulation will stop when the first of the specified events occur. If you dont specify any events, all model events will be included (in this case ['falling', 'impact']). This means that without specifying events, execution would have ended early, when the object starts falling, like below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1)\n", + "print(\"Last timestep: \", results.times[-1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that simulation stopped at around 3.8 seconds, about when the object starts falling.\n", + "\n", + "Alternatively, if we would like to execute until all events have occurred, we can use the `event_strategy` argument, which specifies the strategy for stopping evaluation. The default value is `first`, but we can change it to `all`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1, event_strategy=\"all\")\n", + "print(\"Last timestep: \", results.times[-1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note the simulation stopped at around 7.9 seconds, when the last of the events occurred ('impact').\n", + "\n", + "We can also specify `event_strategy` to be a custom function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from numpy import all\n", + "\n", + "\n", + "# Custom function that stops when all objects impact ground\n", + "def thresholds_met_eqn(thresholds_met):\n", + " return all(thresholds_met[\"impact\"])\n", + "\n", + "\n", + "results = m.simulate_to_threshold(\n", + " save_freq=0.5, dt=0.1, event_strategy=thresholds_met_eqn\n", + ")\n", + "print(\"Last timestep: \", results.times[-1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Again, we can see that the simulation stopped at around 7.9 seconds, when the last of the events occurred ('impact').\n", + "\n", + "This is a basic example of simulating to an event. However, this is still just an example. Most models will have some form of input or loading. Simulating these models is described in the following section. The remainder of the sections go through various features for further customizing simulations." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Future Loading" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The previous examples feature a simple ThrownObject model, which does not have any inputs. Unlike ThrownObject, most prognostics models have some sort of [input](https://nasa.github.io/progpy/glossary.html#term-input). The input is some sort of control or loading applied to the system being modeled. In this section we will describe how to simulate a model which features an input.\n", + "\n", + "In this example we will be using the BatteryCircuit model from the models subpackage (see __[03 Existing Models](03_Existing%20Models.ipynb)__). This is a simple battery discharge model where the battery is represented by an equivalent circuit.\n", + "\n", + "Like the past examples, we start by importing and creating the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit\n", + "\n", + "m = BatteryCircuit()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see the battery's inputs, states, and outputs (described above) by accessing these attributes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"outputs:\", m.outputs)\n", + "print(\"inputs:\", m.inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Consulting the [model documentation](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#prog_models.models.BatteryCircuit), we see that the outputs (i.e., measurable values) of the model are temperature (`t`) and voltage (`v`). The model's input is the current (`i`) drawn from the battery.\n", + "\n", + "If we try to simulate as we do above (without specifying loading), it wouldn't work because the battery discharge is a function of the current (`i`) drawn from the battery. Simulation for a model like this requires that we define the future load. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Piecewise Load" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For the first example, we define a piecewise loading profile using the `progpy.loading.Piecewise` class. This is one of the most common loading profiles. First we import the class from the loading subpackage and matplotlib for graphing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.loading import Piecewise" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we define a loading profile. Piecewise loader takes 3 arguments: 1. the model InputContainer, 2. times and 3. loads. Each of these are explained in more detail below.\n", + "\n", + "The model input container is a class for representing the input for a model. It's a class attribute for every model, and is specific to that model. It can be found at m.InputContainer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.InputContainer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "InputContainers are initialized with either a dictionary or a column vector." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(m.InputContainer({\"i\": 3}))\n", + "import numpy as np\n", + "\n", + "print(m.InputContainer(np.vstack((2.3,))))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The second and third arguments for the loading profile are times and loads. Together, the 'times' and 'loads' arguments specify what load is applied to the system at what times throughout simulation. The values in 'times' specify the ending time for each load. For example, if times were [5, 7, 10], then the first load would apply until t=5, then the second load would apply for 2 seconds, following by the third load for 3 more seconds. \n", + "\n", + "Loads are a dictionary of arrays, where the keys of the dictionary are the inputs to the model (for a battery, just current `i`), and the values in the array are the value at each time in times. If the loads array is one longer than times, then the last value is the \"default load\", i.e., the load that will be applied after the last time has passed.\n", + "\n", + "For example, we might define this load profile for our battery." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "loading = Piecewise(\n", + " InputContainer=m.InputContainer,\n", + " times=[600, 900, 1800, 3000],\n", + " values={\"i\": [2, 1, 4, 2, 3]},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this case, the current drawn (`i`) is 2 amps until t is 600 seconds, then it is 1 for the next 300 seconds (until 900 seconds), etc. The \"default load\" is 3, meaning that after the last time has passed (3000 seconds) a current of 3 will be drawn. \n", + "\n", + "Now that we have this load profile, let's run a simulation with our model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(loading, save_freq=100)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the inputs to the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See above that the load profile is piecewise, matching the profile we defined above.\n", + "\n", + "Plotting the outputs, you can see jumps in the voltage levels as the current changes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def print_battery_output_plots(results):\n", + " fig = results.outputs.plot(\n", + " keys=[\"t\"], xlabel=\"time (s)\", ylabel=\"temperature (K)\", figsize=(10, 4)\n", + " )\n", + " fig2 = results.outputs.plot(\n", + " keys=[\"v\"], xlabel=\"time (s)\", ylabel=\"voltage (V)\", figsize=(10, 4)\n", + " )\n", + "\n", + "\n", + "print_battery_output_plots(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example we simulated to threshold, loading the system using a simple piecewise load profile. This is the most common load profile and will probably work for most cases." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Moving Average" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Another common loading scheme is the moving-average load. This loading scheme assumes that the load will continue like it's seen in the past. This is useful when you don't know the exact load, but you expect it to be consistent.\n", + "\n", + "Like with Piecewise loading, the first step it to import the loading class. In this case, `progpy.loading.MovingAverage`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.loading import MovingAverage" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we create the moving average loading object, passing in the InputContainer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "loading = MovingAverage(m.InputContainer)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The moving average load estimator requires an additional step, sending the observed load. This is done using the add_load method. Let's load it up with some observed current draws. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "measured_loads = [4, 4.5, 4.0, 4, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2]\n", + "\n", + "for load in measured_loads:\n", + " loading.add_load({\"i\": load})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In practice the add_load method should be called whenever there's new input (i.e., load) information. The MovingAverage load estimator averages over a window of elements, configurable at construction using the window argument (e.g., MovingAverage(m.InputContainer, window=12)).\n", + "\n", + "Now the configured load estimator can be used in simulation. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(loading, save_freq=100)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the resulting input current." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the loading is a constant around 2, this is because the larger loads (~4 amps) are outside of the averaging window. Here are the resulting outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_battery_output_plots(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The voltage and temperature curves are much cleaner. They don't have the jumps present in the piecewise loading example. This is due to the constant loading.\n", + "\n", + "In this example we simulated to threshold, loading the system using a constant load profile calculated using the moving average load estimator. This load estimator needs to be updated with the add_load method whenever new loading data is available. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Gaussian Noise in Loading" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Typically, users have an idea of what loading will look like, but there is some uncertainty. Future load estimates are hardly ever known exactly. This is where load wrappers like the `progpy.loading.GaussianNoiseLoadWrapper` come into play. The GaussianNoiseLoadWrapper wraps around another load profile, adding a random amount of noise, sampled from a Gaussian distribution, at each step. This will show some variability in simulation, but this becomes more important in prediction (see __[08 Prediction](08_Prediction.ipynb)__).\n", + "\n", + "In this example we will repeat the Piecewise load example, this time using the GaussianNoiseLoadWrapper to represent our uncertainty in our future load estimate. \n", + "\n", + "First we will import the necessary classes and construct the Piecewise load estimation just as in the previous example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.loading import Piecewise, GaussianNoiseLoadWrapper\n", + "\n", + "loading = Piecewise(\n", + " InputContainer=m.InputContainer,\n", + " times=[600, 900, 1800, 3000],\n", + " values={\"i\": [2, 1, 4, 2, 3]},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we will wrap this loading object in our Gaussian noise load wrapper" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this case we're adding Gaussian noise with a standard deviation of 0.2 to the result of the previous load estimator.\n", + "\n", + "Now let's simulate and look at the input profile." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note the loading profile follows the piecewise shape, but with noise. If you run it again, you would get a slightly different result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here are the corresponding outputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_battery_output_plots(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the noise in input can be seen in the resulting output plots.\n", + "\n", + "The seed can be set in creation of the GaussianNoiseLoadWrapper to ensure repeatable results, for example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2, seed=2000)\n", + "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")\n", + "\n", + "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2, seed=2000)\n", + "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The load profiles in the two examples above are identical because they share the same random seed.\n", + "\n", + "In this section we introduced the concept of NoiseWrappers and how they are used to represent uncertainty in future loading. This concept is especially important when used with prediction (see __[08 Prediction](08_Prediction.ipynb)__). A GaussianNoiseLoadWrapper was used with a Piecewise loading profile to demonstrate it, but NoiseWrappers can be applied to any loading object or function, including the advanced profiles introduced in the next section." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom Load Profiles" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For most applications, the standard load estimation classes can be used to represent a user's expectation of future loading. However, there are some cases where load is some complex combination of time and state that cannot be represented by these classes. This section briefly describes a few of these cases. \n", + "\n", + "The first example is similar to the last one, in that there is Gaussian noise added to some underlying load profile. In this case the magnitude of noise increases linearly with time. This is an important example, as it allows us to represent a case where loading further out in time has more uncertainty (i.e., is less well known). This is common for many prognostic use-cases.\n", + "\n", + "Custom load profiles can be represented either as a function (t, x=None) -> u, where t is time, x is state, and u is input, or as a class which implements the __call__ method with the same profile as the function.\n", + "\n", + "In this case we will use the first method (i.e., the function). We will define a function that will use a defined slope (derivative of standard deviation with time)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from numpy.random import normal\n", + "\n", + "base_load = 2 # Base load (amps)\n", + "std_slope = 1e-4 # Derivative of standard deviation with time\n", + "\n", + "\n", + "def loading(t, x=None):\n", + " std = std_slope * t\n", + " return m.InputContainer({\"i\": normal(base_load, std)})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the above code is specifically for a battery, but it could be generalized to any system.\n", + "\n", + "Now let's simulate and look at the input profile." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(loading, save_freq=100)\n", + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note how the noise in the input signal increases with time. Since this is a random process, if you were to run this again you would get a different result.\n", + "\n", + "Here is the corresponding output. Note you can see the effects of the increasingly erratic input in the voltage curve." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_battery_output_plots(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the final example we will define a loading profile that considers state. In this example, we're simulating a scenario where loads are removed (i.e., turned off) when discharge event state (i.e., SOC) reaches 0.25. This emulates a \"low power mode\" often employed in battery-powered electronics.\n", + "\n", + "For simplicity the underlying load will be constant, but this same approach could be applied to more complex profiles, and noise can be added on top using a NoiseWrapper." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "normal_load = m.InputContainer({\"i\": 2.7})\n", + "low_power_load = m.InputContainer({\"i\": 1.9})\n", + "\n", + "\n", + "def loading(t, x=None):\n", + " if x is not None:\n", + " # State is provided\n", + " soc = m.event_state(x)[\"EOD\"]\n", + " return normal_load if soc > 0.25 else low_power_load\n", + " return normal_load" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the above example checks if x is not None. For some models, for the first timestep, state may be None (because state hasn't yet been calculated).\n", + "\n", + "Now let's use this in simulation and take a look at the loading profile." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(loading, save_freq=100)\n", + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, as expected, load is at normal level for most of the time, then falls to low power mode towards the end of discharge.\n", + "\n", + "Let's look at the corresponding outputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_battery_output_plots(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice the jump in voltage at the point where the load changed. Low power mode extended the life of the battery.\n", + "\n", + "In this section we show how to make custom loading profiles. Most applications can use the standard load classes, but some may require creating complex custom load profiles using this feature." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step Size" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next configurable parameter in simulation is the step size, or `dt`. This is the size of the step taken from one step to the next when simulating. Smaller step sizes will usually more accurately simulate state evolution, but at a computational cost. Conversely, some models can become unstable at large step sizes. Choosing the correct step size is important to the success of a simulation or prediction.\n", + "\n", + "In this section we will introduce the concept of setting simulation step size (`dt`) and discuss some considerations when selecting step sizes.\n", + "\n", + "For this section we will use the `progpy.models.ThrownObject model` (see __[03 Existing Models](03_Existing%20Models.ipynb)__), imported and created below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import ThrownObject\n", + "\n", + "m = ThrownObject()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Basic Step Size" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To set step size, set the dt parameter in the `simulate_to` or `simulate_to_threshold` methods. In this example we will use a large and small step size and compare the results.\n", + "\n", + "First, let's simulate with a large step size, saving the result at every step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(events=\"impact\", dt=2.5, save_freq=2.5)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the parabola above is jagged. Also note that the estimated time of impact is around 10 seconds and the maximum height is a little over 120 meters. \n", + "\n", + "Now let's run the simulation again with a smaller step size." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(events=\"impact\", dt=0.25, save_freq=0.25)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Not only is the curve much smoother with a smaller step size, but the results are significantly different. Now the time of impact is closer to 8 seconds and maximum height closer to 80 meters.\n", + "\n", + "All simulations are approximations. The example with the larger step size accumulates more error in integration. The second example (with a smaller step size) is more accurate to the actual model behavior.\n", + "\n", + "Now let's decrease the step size even more." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(events=\"impact\", dt=0.05, save_freq=0.05)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The resulting output is different than the 0.25 second step size run, but not by much. What you see here is the diminishing returns in decreasing step size.\n", + "\n", + "The smaller the step size, the more computational resources required to simulate it. This doesn't matter as much for simulating this simple model over a short horizon, but becomes very important when performing prediction (see __[08 Prediction](08_Prediction.ipynb)__), using a complex model with a long horizon, or when operating in a computationally constrained environment (e.g., embedded)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Dynamic Step Size" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The last section introduced step size and showed how changing the step size effects the simulation results. In the last example step size (`dt`) and `save_freq` were the same, meaning each point was captured exactly. This is not always the case, for example in the case below. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(events=\"impact\", dt=1, save_freq=1.5)\n", + "print(\"Times saved: \", results.times)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With a `save_freq` of 1.5 seconds you might expect the times saved to be 0, 1.5, 3, 4.5, ..., but that's not the case. This is because the timestep is 1 second, so the simulation never stops near 1.5 seconds to record it. 'auto' stepsize can help with this.\n", + "\n", + "To use 'auto' stepsize set `dt` to a tuple of ('auto', MAX) where MAX is replaced with the maximum allowable stepsize." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(events=\"impact\", dt=(\"auto\", 1), save_freq=1.5)\n", + "print(\"Times saved: \", results.times)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We repeated the simulation using automatic step size with a maximum step size of 1. The result was that the times where state was saved matched what was requested exactly. This is important for simulations with large step sizes where there are specific times that must be captured.\n", + "\n", + "Also note that automatic step size doesn't just adjust for `save_freq`. It will also adjust to meet `save_pts` and any transition points in a Piecewise loading profile." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom Step Size" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There are times when an advanced user would like more flexibility in selecting step sizes. This can be used to adjust step size dynamically close to events or times of interest. In some models, there are complex behaviors during certain parts of the life of the system that require more precise simulation. For example, the knee point in the voltage profile for a discharged battery. This can be done by providing a function (t, x)->dt instead of a scalar `dt`. \n", + "\n", + "For example, if a user wanted to reduce the step size closer to impact, they could do so like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def next_time(t, x):\n", + " # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.25, then 0.25\n", + " event_state = m.event_state(x)\n", + " if event_state[\"impact\"] < 0.25:\n", + " return 0.25\n", + " return 1\n", + "\n", + "\n", + "results = m.simulate_to_threshold(dt=next_time, save_freq=0.25, events=\"impact\")\n", + "\n", + "print(results.times)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that after 8 seconds the step size decreased to 0.25 seconds, as expected." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "All the previous sections used a model with default settings. This is hardly ever the case. A model will have to be configured to represent the actual system. For example, the BatteryCircuit default parameters are for a 18650 battery tested in NASA's SHARP lab. If you're using the model for a system other than that one battery, you will need to update the parameters.\n", + "\n", + "The parameters available are specific to the system in question. See __[03 Existing Models](03_Existing%20Models.ipynb)__ for a more detailed description of these. For example, for the BatteryCircuit model, parameters include battery capacity, internal resistance, and other electrical characteristics.\n", + "\n", + "In this section we will adjust the parameters for the ThrownObject Model, observing how that changes system behavior." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import ThrownObject\n", + "import matplotlib.pyplot as plt\n", + "\n", + "m = ThrownObject()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Parameters can be accessed using the `parameters` attribute. For a ThrownObject, here are the parameters:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(m.parameters)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Ignoring process and measurement noise for now (that will be described in the next section) and the lumped_parameter (which will be described in 4. Creating new prognostic models), the parameters of interest here are described below:\n", + "\n", + "* **thrower_height**: The height of the thrower in meters, and therefore the initial height of the thrown object\n", + "* **throwing_speed**: The speed at which the ball is thrown vertically (in m/s)\n", + "* **g**: Acceleration due to gravity (m/s^2)\n", + "* **rho**: Air density (affects drag)\n", + "* **A**: Cross-sectional area of the object (affects drag)\n", + "* **m**: Mass of the object (affects drag)\n", + "* **cd**: Coefficient of drag of the object (affects drag)\n", + "\n", + "Let's try simulating the path of the object with different throwing speeds." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "default_throw = m.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "m[\"throwing_speed\"] = 10\n", + "slow_throw = m.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "m[\"throwing_speed\"] = 80\n", + "fast_throw = m.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "plt.figure(figsize=(10, 8))\n", + "\n", + "plt.plot(default_throw.times, default_throw.outputs, label=\"Default\", color=\"tab:blue\")\n", + "plt.plot(slow_throw.times, slow_throw.outputs, label=\"Slow\", color=\"tab:orange\")\n", + "plt.plot(fast_throw.times, fast_throw.outputs, label=\"Fast\", color=\"tab:green\")\n", + "\n", + "plt.legend()\n", + "plt.title(\"Simulation with throws at different speeds\", pad=10)\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"position (m)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also set parameters as keyword arguments when instantiating the model, like below. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_e = ThrownObject(g=-9.81) # Earth gravity\n", + "results_earth = m_e.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "m_j = ThrownObject(g=-24.79) # Jupiter gravity\n", + "results_jupiter = m_j.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "plt.figure(figsize=(10, 8))\n", + "\n", + "plt.plot(results_earth.times, results_earth.outputs, label=\"Earth\", color=\"tab:blue\")\n", + "plt.plot(\n", + " results_jupiter.times, results_jupiter.outputs, label=\"Jupiter\", color=\"tab:orange\"\n", + ")\n", + "\n", + "plt.legend()\n", + "plt.title(\"Simulation with throws under Earth's and Jupiter's gravity\", pad=10)\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"position (m)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Model parameters are used to configure a model to accurately describe the system of interest.\n", + "\n", + "For a simple system like the ThrownObject, model parameters are simple and measurable. For most systems, there are many parameters that are difficult to estimate. For these, parameter estimation comes into play. See __[02 Parameter_Estimation](02_Parameter%20Estimation.ipynb)__ for more details" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Noise" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It is impossible to have absolute knowledge of future states due to uncertainties in the system. To account for this, we can incorporate uncertainty into a model through the following forms:\n", + "\n", + "* __Process Noise__: Noise representing uncertainty in the model transition (e.g., model or model configuration uncertainty, uncertainty from simplifying assumptions). Applied during state transition.\n", + "* __Measurement Noise__: Noise representing uncertainty in the measurement process (e.g., sensor sensitivity, sensor misalignments, environmental effects). Applied during estimation of outputs from states.\n", + "* __Future Loading Noise__: Noise representing uncertainty in the future loading estimates (e.g., uncertainty from incomplete knowledge of future loading). It is the responsibility of the user to apply Future Loading Noise as appropriate in the supplied future loading method.\n", + "\n", + "Other types of uncertainty will be introduced in __[08 Prediction](08_Prediction.ipynb)__.\n", + "\n", + "In this section, we will be examining multiple approaches for adding process and measurement noise. For an example of future loading noise, please refer to the `GaussianNoiseLoadWrapper` in the [Gaussian Noise in Loading](#gaussian-noise-in-loading) section.\n", + "\n", + "We will start by importing the ThrownObject model for simulation and matplotlib for graphing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import ThrownObject\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib.lines import Line2D" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now define the configuration of the simulation and some helper functions to print the results. For this example, we will not be passing in a future load since the ThrownObject model has no inputs and we cannot load the system (i.e., we cannot affect it once it's in the air)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"events\": \"impact\",\n", + " \"dt\": 0.005,\n", + " \"save_freq\": 0.5,\n", + "}\n", + "\n", + "\n", + "def print_results(simulated_results):\n", + " print(\"states:\")\n", + " for t, x in zip(simulated_results.times, simulated_results.states):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", + "\n", + " print(\"outputs:\")\n", + " for t, x in zip(simulated_results.times, simulated_results.outputs):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", + "\n", + " # The simulation stopped at impact, so the last element of times is the impact time\n", + " print(\"\\nimpact time: {:.2f}s\".format(simulated_results.times[-1]))\n", + "\n", + "\n", + "def plot_comparison(no_noise_simulation, simulated_results):\n", + " plt.figure(figsize=(10, 8))\n", + "\n", + " print_noise_plot(simulated_results)\n", + " print_no_noise_plot(no_noise_simulation)\n", + "\n", + " color_legend = [\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:blue\",\n", + " linestyle=\"None\",\n", + " label=\"Position (x) [m]\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:orange\",\n", + " linestyle=\"None\",\n", + " label=\"Velocity (v) [m/s]\",\n", + " ),\n", + " ]\n", + "\n", + " linestyle_legend = [\n", + " Line2D([0], [0], color=\"black\", lw=2, linestyle=\"-\", label=\"Noise\"),\n", + " Line2D([0], [0], color=\"black\", lw=2, linestyle=\"--\", label=\"No noise\"),\n", + " ]\n", + "\n", + " plt.legend(handles=color_legend + linestyle_legend, bbox_to_anchor=(1, 1))\n", + " plt.xlabel(\"time (s)\")\n", + " plt.ylabel(\"state\")\n", + "\n", + "\n", + "def print_no_noise_plot(no_noise_simulation):\n", + " no_noise_x = [state[\"x\"] for state in no_noise_simulation.states]\n", + " no_noise_v = [state[\"v\"] for state in no_noise_simulation.states]\n", + "\n", + " plt.plot(\n", + " no_noise_simulation.times,\n", + " no_noise_x,\n", + " label=\"Position (x) [m]\",\n", + " color=\"#155d8d\",\n", + " linestyle=\"dashed\",\n", + " )\n", + " plt.plot(\n", + " no_noise_simulation.times,\n", + " no_noise_v,\n", + " label=\"Velocity (v) [m/s]\",\n", + " color=\"#d65a08\",\n", + " linestyle=\"dashed\",\n", + " )\n", + " plt.legend()\n", + "\n", + "\n", + "def print_noise_plot(simulation):\n", + " noise_x = [state[\"x\"] for state in simulation.states]\n", + " noise_v = [state[\"v\"] for state in simulation.states]\n", + " plt.plot(simulation.times, noise_x, label=\"Position (x) [m]\", color=\"tab:blue\")\n", + " plt.plot(simulation.times, noise_v, label=\"Velocity (v) [m/s]\", color=\"tab:orange\")\n", + " plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start with an example with no noise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = ThrownObject(process_noise=False)\n", + "print(\"Simulation without noise\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "print_no_noise_plot(simulated_results)\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"state\")\n", + "\n", + "plt.title(\"Simulation with no noise\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we see a clean parabola for position and linear decrease in speed. Exactly what we would expect for this model without noise.\n", + "\n", + "Let's save the simulated results from this example into the variable `no_noise_simulation` to use as a comparison reference to the next few examples showing simulations with noise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "no_noise_simulation = simulated_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here is an example of a simulation with normal (i.e., Gaussian) process noise with a standard deviation of 25 applied to every state. Even though this standard deviation is quite high, we'll notice the curves aren't dramatically different due to the small step size. At every step, noise is resampled, so the noise added on a single step may be large but cancelled out over many steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "process_noise = 25\n", + "\n", + "m = ThrownObject(process_noise=process_noise)\n", + "print(\"Simulation with process noise\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\"Simulation with no noise vs. process noise\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note the deviation from the 'no noise' run in both states. Noise is sampled randomly, so if you were to rerun the code above again, you would see a slightly different curve.\n", + "We can also specify different amounts of noise on different states. This is an example of a simulation with more process noise on position than velocity. Here you should see a smooth curve for the velocity and a noisy curve for position." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "process_noise = {\"x\": 30, \"v\": 1}\n", + "\n", + "m = ThrownObject(process_noise=process_noise)\n", + "print(\"Simulation with more process noise on position than velocity\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\n", + " \"Simulation with no noise vs. more process noise on position than velocity\", pad=10\n", + ")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also define the shape of the noise to be uniform or triangular instead of normal. The image below shows the shapes of these distributions. Users might select a different noise shape to better capture the nature and shape of the state transition uncertainty.\n", + "\n", + "![Graphs of normal, triangular, and uniform distributions](distributions.png \"Distributions\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " This example demonstrates a uniform process noise distribution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "process_noise = {\"x\": 30, \"v\": 1}\n", + "process_noise_dist = \"uniform\"\n", + "model_config = {\n", + " \"process_noise_dist\": process_noise_dist,\n", + " \"process_noise\": process_noise,\n", + "}\n", + "\n", + "m = ThrownObject(**model_config)\n", + "print(\"Simulation with uniform process noise distribution\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\"Simulation with no noise vs. uniform process noise distribution\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The above examples all focused on process noise. We can apply measurement noise in the same way. \n", + "\n", + "Since measurement noise is applied during the estimation of the outputs from the states, in this example, we will see that the `x` outputs differ from the `x` states. Unlike the examples with process noise, the `x` states in this simulation are equal to the `x` states and outputs of a simulation without noise, as measurement noise is not applied until later.\n", + "\n", + "In the graph below, we can observe the measurement noise reflected in the `x` outputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "measurement_noise = {\"x\": 10}\n", + "measurement_noise_dist = \"triangular\"\n", + "model_config = {\n", + " \"measurement_noise_dist\": measurement_noise_dist,\n", + " \"measurement_noise\": measurement_noise,\n", + "}\n", + "\n", + "m = ThrownObject(**model_config)\n", + "print(\"Simulation with triangular measurement noise distribution\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "\n", + "plt.figure(figsize=(10, 8))\n", + "\n", + "noise_x = [state[\"x\"] for state in simulated_results.states]\n", + "noise_output_x = [state[\"x\"] for state in simulated_results.outputs]\n", + "no_noise_output_x = [state[\"x\"] for state in no_noise_simulation.outputs]\n", + "\n", + "plt.plot(simulated_results.times, noise_x, label=\"With noise state\", color=\"tab:blue\")\n", + "plt.plot(\n", + " simulated_results.times,\n", + " noise_output_x,\n", + " label=\"With noise output\",\n", + " color=\"tab:purple\",\n", + ")\n", + "plt.plot(\n", + " simulated_results.times,\n", + " no_noise_output_x,\n", + " label=\"No noise output\",\n", + " color=\"black\",\n", + " linestyle=(0, (5, 4)),\n", + ")\n", + "\n", + "plt.legend()\n", + "plt.xlabel(\"time\")\n", + "plt.ylabel(\"position (m)\")\n", + "plt.title(\n", + " \"Simulation with no noise vs. triangular measurement noise distribution\", pad=10\n", + ")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In some cases, users might want to define some custom noise profile. This is especially important for complex cases where the amount of noise changes as a function of state.\n", + "To demonstrate this, we'll demonstrate a scenario where process noise on velocity is proportional to state. This could represent a case where the model is unstable for high velocities. In this example, we will define a helper function `apply_proportional_process_noise`, which will add noise to v that increases as the object is going faster.\n", + "\n", + "If we wanted to apply noise in a replicable manner, we could set the numpy random seed to a fixed value before a run, e.g., `numpy.random.seed(42)`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def apply_proportional_process_noise(self, x, dt=1):\n", + " x[\"v\"] -= dt * 0.5 * x[\"v\"]\n", + " return x\n", + "\n", + "\n", + "model_config = {\"process_noise\": apply_proportional_process_noise}\n", + "\n", + "m = ThrownObject(**model_config)\n", + "print(\"Simulation with proportional noise on velocity\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\"Simulation with no noise vs. proportional noise on velocity\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Vectorized Simulation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some models support vectorization, where multiple states are simulated in parallel. This is a more efficient simulation technique than simulating from multiple states in parallel. We will import the ThrownObject model and confirm that the model supports vectorization by checking the `is_vectorized` property." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models.thrown_object import ThrownObject\n", + "\n", + "m = ThrownObject()\n", + "m.is_vectorized" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will be using `simulate_to_threshold` with vectorized states, which are a representation of a system's current conditions. The ThrownObject model will be used to simulate multiple thrown objects. Let's start by getting the necessary imports." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from numpy import array\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will also define a helper function to visualize the four throws." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def print_vectorized_sim_plots(simulated_results):\n", + " plt.figure(figsize=(10, 8))\n", + "\n", + " first_throw_x = [state[\"x\"][0] for state in simulated_results.states]\n", + " first_throw_v = [state[\"v\"][0] for state in simulated_results.states]\n", + "\n", + " second_throw_x = [state[\"x\"][1] for state in simulated_results.states]\n", + " second_throw_v = [state[\"v\"][1] for state in simulated_results.states]\n", + "\n", + " third_throw_x = [state[\"x\"][2] for state in simulated_results.states]\n", + " third_throw_v = [state[\"v\"][2] for state in simulated_results.states]\n", + "\n", + " fourth_throw_x = [state[\"x\"][3] for state in simulated_results.states]\n", + " fourth_throw_v = [state[\"v\"][3] for state in simulated_results.states]\n", + "\n", + " plt.plot(\n", + " simulated_results.times, first_throw_x, color=\"tab:blue\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, first_throw_v, color=\"tab:blue\")\n", + "\n", + " plt.plot(\n", + " simulated_results.times, second_throw_x, color=\"tab:orange\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, second_throw_v, color=\"tab:orange\")\n", + "\n", + " plt.plot(\n", + " simulated_results.times, third_throw_x, color=\"tab:cyan\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, third_throw_v, color=\"tab:cyan\")\n", + "\n", + " plt.plot(\n", + " simulated_results.times, fourth_throw_x, color=\"tab:purple\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, fourth_throw_v, color=\"tab:purple\")\n", + "\n", + " plt.xlabel(\"time (s)\")\n", + " plt.ylabel(\"state\")\n", + "\n", + " color_legend = [\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:blue\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 1\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:orange\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 2\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:cyan\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 3\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:purple\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 4\",\n", + " ),\n", + " ]\n", + "\n", + " linestyle_legend = [\n", + " Line2D([0], [0], color=\"black\", lw=2, linestyle=\"-\", label=\"Position (x) [m]\"),\n", + " Line2D(\n", + " [0], [0], color=\"black\", lw=2, linestyle=\"--\", label=\"Velocity (v) [m/s]\"\n", + " ),\n", + " ]\n", + "\n", + " plt.legend(handles=color_legend + linestyle_legend, bbox_to_anchor=(1, 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now set up the vectorized initial state. In this example, we will define 4 throws of varying positions (x) and strengths (v) in `first_state`. We will then simulate to the threshold using this state. We should see the simulation stop once all objects hits the ground since the `event_strategy` is 'all'." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "first_state = {\"x\": array([1.75, 1.8, 1.85, 1.9]), \"v\": array([35, 39, 22, 47])}\n", + "\n", + "m = ThrownObject()\n", + "simulated_results = m.simulate_to_threshold(\n", + " x=first_state,\n", + " events=\"impact\",\n", + " event_strategy=\"all\",\n", + " print=True,\n", + " dt=0.1,\n", + " save_freq=1,\n", + ")\n", + "\n", + "print_vectorized_sim_plots(simulated_results)\n", + "plt.title(\"Vectorized simulation until any object hits the ground\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using a vectorized simulation is more efficient than separately simulating multiple cases. This can be useful when we might need to compare multiple options or if there are a discrete set of possible starting states." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuring Simulation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we will explore other ways to configure simulations. These approaches can be used to configure a simulation even further to match a use case. We will use the Battery model for these examples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit as Battery\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create a model object and define a piecewise future loading function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batt = Battery()\n", + "\n", + "\n", + "def future_loading(t, x=None):\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return batt.InputContainer({\"i\": i})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Simulating From a Known Time" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There may be cases where we want to simulate from a time other than 0. For example, we may have a future loading profile that is a function of time and need to pause our simulation midway (e.g., the results inform a decision) before continuing from where we left off.\n", + "\n", + "To do this, we can adjust `t0`. The following example shows a battery simulation that starts at 700 seconds." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 100,\n", + " \"dt\": 2,\n", + " \"t0\": 700,\n", + "}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)\n", + "\n", + "print(\"First timestamp in simulation :\", simulated_results.times[0])\n", + "\n", + "simulated_results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")\n", + "plt.scatter(\n", + " simulated_results.times[0], simulated_results.inputs[0][\"i\"], color=\"red\", zorder=5\n", + ")\n", + "plt.annotate(\n", + " f\"({simulated_results.times[0]}, {simulated_results.inputs[0]['i']})\",\n", + " xy=(\n", + " simulated_results.times[0],\n", + " simulated_results.inputs[0][\"i\"],\n", + " ), # Point to annotate\n", + " xytext=(simulated_results.times[0], simulated_results.inputs[0][\"i\"] + 0.05),\n", + " fontsize=10,\n", + " horizontalalignment=\"center\",\n", + " verticalalignment=\"bottom\",\n", + ")\n", + "plt.show()\n", + "\n", + "simulated_results.outputs.plot(keys=[\"v\"], xlabel=\"time (s)\", ylabel=\"voltage (V)\")\n", + "plt.scatter(\n", + " simulated_results.times[0], simulated_results.outputs[0][\"v\"], color=\"red\", zorder=5\n", + ")\n", + "plt.annotate(\n", + " f\"({simulated_results.times[0]}, {simulated_results.outputs[0]['v']})\",\n", + " xy=(\n", + " simulated_results.times[0],\n", + " simulated_results.outputs[0][\"v\"],\n", + " ), # Point to annotate\n", + " xytext=(simulated_results.times[0], simulated_results.outputs[0][\"v\"] + 0.04),\n", + " fontsize=10,\n", + " horizontalalignment=\"left\",\n", + " verticalalignment=\"top\",\n", + ")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can observe how different input current draws affect the voltage output curve. Generally, the graphs indicate that drawing a higher current leads to a lower voltage." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Integration Method" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Simulation is essentially the process of integrating a model forward with time. By default, a simple Euler integration is used to propagate the model forward. Advanced users can change the numerical integration method to affect the simulation accuracy and runtime. This is done using the `integration_method` argument in `simulate_to()`, `simulate_to_threshold()`, or the model parameters like `m.parameters['integration_method'] = 'rk4'`. Note that the integration method can only be changed for continuous models.\n", + "\n", + "Let's look at an example of simulating with both the default Euler integration method and with the Runge-Kutta fourth-order (RK4) integration method. Since RK4 is a higher-order integration method, it is more accurate than a simple Euler integration. However, it is also more complex and therefore more computationally expensive. Let's compare the results of these two techniques.\n", + "\n", + "First, we'll integrate with a step size of 1. Here, we can see that the two integration techniques are nearly identical. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 10,\n", + " \"dt\": 1,\n", + "}\n", + "\n", + "rk4_config = {\"save_freq\": 10, \"dt\": 1, \"integration_method\": \"rk4\"}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)\n", + "rk4_simulated_results = batt.simulate_to_threshold(future_loading, **rk4_config)\n", + "\n", + "\n", + "def plot_integration_method_comparison(simulated_results, rk4_simulated_results):\n", + " euler_v = [o[\"v\"] for o in simulated_results.outputs]\n", + " rk4_v = [o[\"v\"] for o in rk4_simulated_results.outputs]\n", + "\n", + " plt.plot(simulated_results.times, euler_v)\n", + " plt.plot(simulated_results.times, rk4_v, linestyle=\"dashed\")\n", + " plt.xlabel(\"time (s)\")\n", + " plt.ylabel(\"voltage (V)\")\n", + " plt.legend([\"Euler\", \"RK4\"])\n", + "\n", + "\n", + "plot_integration_method_comparison(simulated_results, rk4_simulated_results)\n", + "plt.title(\"Simulation with step size 1 and Euler vs. RK4 integration method\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's increase the step size to 2. Note that simulating with a larger step size results in a less accurate simulation result. In this case, the lower-accuracy Euler method is becoming unstable, but the higher-order RK4 method is still resulting in an accurate solution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 10,\n", + " \"dt\": 2,\n", + "}\n", + "\n", + "rk4_config = {\"save_freq\": 10, \"dt\": 2, \"integration_method\": \"rk4\"}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)\n", + "rk4_simulated_results = batt.simulate_to_threshold(future_loading, **rk4_config)\n", + "\n", + "plot_integration_method_comparison(simulated_results, rk4_simulated_results)\n", + "plt.title(\"Simulation with step size 2 and Euler vs. RK4 integration method\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Based on the graph, we can see differences in voltage outputs between the two integration methods. We can see that the simulation using the `RK4` integration method produces a smoother and more accurate curve compared to the simulation using the `Euler` integration method. This is expected, as `RK4` is a higher-order integration method than `Euler` and is generally more accurate, albeit slower to simulate." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook, we've demonstrated how to conduct simulations with prognostics models. The next notebook __[02 Parameter_Estimation](02_Parameter%20Estimation.ipynb)__ will examine how we can estimate and tune model parameters so that simulations can best match the behavior observed in some available data." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/_downloads/213762e4b3b188b80ce7be5cde9057e3/tutorial.ipynb b/docs/_downloads/213762e4b3b188b80ce7be5cde9057e3/tutorial.ipynb index 43ea593d..9a995eff 100644 --- a/docs/_downloads/213762e4b3b188b80ce7be5cde9057e3/tutorial.ipynb +++ b/docs/_downloads/213762e4b3b188b80ce7be5cde9057e3/tutorial.ipynb @@ -95,8 +95,10 @@ "metadata": {}, "outputs": [], "source": [ - "batt.parameters['qMax'] = 7856 \n", - "batt.parameters['process_noise'] = 0 # Note: by default there is some process noise- this turns it off. Noise will be explained later in the tutorial" + "batt.parameters[\"qMax\"] = 7856\n", + "batt.parameters[\"process_noise\"] = (\n", + " 0 # Note: by default there is some process noise- this turns it off. Noise will be explained later in the tutorial\n", + ")" ] }, { @@ -113,7 +115,8 @@ "outputs": [], "source": [ "from pprint import pprint\n", - "print('Model configuration:')\n", + "\n", + "print(\"Model configuration:\")\n", "pprint(batt.parameters)" ] }, @@ -131,7 +134,8 @@ "outputs": [], "source": [ "import pickle\n", - "pickle.dump(batt.parameters, open('battery123.cfg', 'wb'))" + "\n", + "pickle.dump(batt.parameters, open(\"battery123.cfg\", \"wb\"))" ] }, { @@ -147,7 +151,7 @@ "metadata": {}, "outputs": [], "source": [ - "batt.parameters = pickle.load(open('battery123.cfg', 'rb'))" + "batt.parameters = pickle.load(open(\"battery123.cfg\", \"rb\"))" ] }, { @@ -165,8 +169,8 @@ "metadata": {}, "outputs": [], "source": [ - "print('inputs: ', batt.inputs)\n", - "print('outputs: ', batt.outputs)" + "print(\"inputs: \", batt.inputs)\n", + "print(\"outputs: \", batt.outputs)" ] }, { @@ -182,7 +186,7 @@ "metadata": {}, "outputs": [], "source": [ - "print('event(s): ', batt.events)" + "print(\"event(s): \", batt.events)" ] }, { @@ -198,7 +202,7 @@ "metadata": {}, "outputs": [], "source": [ - "print('states: ', batt.states)" + "print(\"states: \", batt.states)" ] }, { @@ -224,22 +228,22 @@ "outputs": [], "source": [ "def future_loading(t, x=None):\n", - " # Variable (piece-wise) future loading scheme \n", + " # Variable (piece-wise) future loading scheme\n", " # Note: The standard interface for a future loading function is f(t, x)\n", " # State (x) is set to None by default because it is not used in this future loading scheme\n", " # This allows the function to be used without state (e.g., future_loading(t))\n", - " if (t < 600):\n", + " if t < 600:\n", " i = 2\n", - " elif (t < 900):\n", + " elif t < 900:\n", " i = 1\n", - " elif (t < 1800):\n", + " elif t < 1800:\n", " i = 4\n", - " elif (t < 3000):\n", + " elif t < 3000:\n", " i = 2\n", " else:\n", " i = 3\n", " # Since loading is an input to the model, we use the InputContainer for this model\n", - " return batt.InputContainer({'i': i})" + " return batt.InputContainer({\"i\": i})" ] }, { @@ -257,10 +261,12 @@ "source": [ "time_to_simulate_to = 200\n", "sim_config = {\n", - " 'save_freq': 20, \n", - " 'print': True # Print states - Note: is much faster without\n", + " \"save_freq\": 20,\n", + " \"print\": True, # Print states - Note: is much faster without\n", "}\n", - "(times, inputs, states, outputs, event_states) = batt.simulate_to(time_to_simulate_to, future_loading, **sim_config)" + "(times, inputs, states, outputs, event_states) = batt.simulate_to(\n", + " time_to_simulate_to, future_loading, **sim_config\n", + ")" ] }, { @@ -276,9 +282,9 @@ "metadata": {}, "outputs": [], "source": [ - "inputs.plot(ylabel='Current drawn (amps)')\n", - "event_states.plot(ylabel= 'SOC')\n", - "outputs.plot(ylabel= {'v': \"voltage (V)\", 't': 'temperature (°C)'}, compact= False)" + "inputs.plot(ylabel=\"Current drawn (amps)\")\n", + "event_states.plot(ylabel=\"SOC\")\n", + "outputs.plot(ylabel={\"v\": \"voltage (V)\", \"t\": \"temperature (°C)\"}, compact=False)" ] }, { @@ -296,7 +302,7 @@ "metadata": {}, "outputs": [], "source": [ - "print('monotonicity: ', event_states.monotonicity())" + "print(\"monotonicity: \", event_states.monotonicity())" ] }, { @@ -312,12 +318,12 @@ "metadata": {}, "outputs": [], "source": [ - "batt_simulation = batt.simulate_to(time_to_simulate_to, future_loading, save_freq = 20)\n", - "print('times: ', batt_simulation.times) \n", - "print('\\ninputs: ', batt_simulation.inputs)\n", - "print('\\nstates: ', batt_simulation.states)\n", - "print('\\noutputs: ', batt_simulation.outputs) \n", - "print('\\nevent states: ', batt_simulation.event_states)" + "batt_simulation = batt.simulate_to(time_to_simulate_to, future_loading, save_freq=20)\n", + "print(\"times: \", batt_simulation.times)\n", + "print(\"\\ninputs: \", batt_simulation.inputs)\n", + "print(\"\\nstates: \", batt_simulation.states)\n", + "print(\"\\noutputs: \", batt_simulation.outputs)\n", + "print(\"\\nevent states: \", batt_simulation.event_states)" ] }, { @@ -340,14 +346,16 @@ "metadata": {}, "outputs": [], "source": [ - "options = { #configuration for this sim\n", - " 'save_freq': 100, # Frequency at which results are saved (s)\n", - " 'horizon': 5000 # Maximum time to simulate (s) - This is a cutoff. The simulation will end at this time, or when a threshold has been met, whichever is first\n", - " }\n", - "(times, inputs, states, outputs, event_states) = batt.simulate_to_threshold(future_loading, **options)\n", - "inputs.plot(ylabel='Current drawn (amps)')\n", - "event_states.plot(ylabel='SOC')\n", - "outputs.plot(ylabel= {'v': \"voltage (V)\", 't': 'temperature (°C)'}, compact= False)" + "options = { # configuration for this sim\n", + " \"save_freq\": 100, # Frequency at which results are saved (s)\n", + " \"horizon\": 5000, # Maximum time to simulate (s) - This is a cutoff. The simulation will end at this time, or when a threshold has been met, whichever is first\n", + "}\n", + "(times, inputs, states, outputs, event_states) = batt.simulate_to_threshold(\n", + " future_loading, **options\n", + ")\n", + "inputs.plot(ylabel=\"Current drawn (amps)\")\n", + "event_states.plot(ylabel=\"SOC\")\n", + "outputs.plot(ylabel={\"v\": \"voltage (V)\", \"t\": \"temperature (°C)\"}, compact=False)" ] }, { @@ -389,30 +397,32 @@ "from progpy.models import ThrownObject\n", "\n", "# Create an instance of the thrown object model with no process noise\n", - "m = ThrownObject(process_noise = False)\n", + "m = ThrownObject(process_noise=False)\n", + "\n", "\n", "# Define future loading\n", - "def future_load(t=None, x=None): \n", + "def future_load(t=None, x=None):\n", " # The thrown object model has no inputs- you cannot load the system (i.e., effect it once it's in the air)\n", " # So we return an empty input container\n", " return m.InputContainer({})\n", "\n", + "\n", "# Define configuration for simulation\n", "config = {\n", - " 'threshold_keys': 'impact', # Simulate until the thrown object has impacted the ground\n", - " 'dt': 0.005, # Time step (s)\n", - " 'save_freq': 0.5, # Frequency at which results are saved (s)\n", + " \"threshold_keys\": \"impact\", # Simulate until the thrown object has impacted the ground\n", + " \"dt\": 0.005, # Time step (s)\n", + " \"save_freq\": 0.5, # Frequency at which results are saved (s)\n", "}\n", "\n", "# Simulate to a threshold\n", "(times, _, states, outputs, _) = m.simulate_to_threshold(future_load, **config)\n", "\n", "# Print results\n", - "print('states:')\n", - "for (t,x) in zip(times, states):\n", - " print('\\t{:.2f}s: {}'.format(t, x))\n", + "print(\"states:\")\n", + "for t, x in zip(times, states):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", "\n", - "print('\\nimpact time: {:.2f}s'.format(times[-1]))\n", + "print(\"\\nimpact time: {:.2f}s\".format(times[-1]))\n", "# The simulation stopped at impact, so the last element of times is the impact time\n", "\n", "# Plot results\n", @@ -432,17 +442,17 @@ "metadata": {}, "outputs": [], "source": [ - "m = ThrownObject(process_noise = 15)\n", + "m = ThrownObject(process_noise=15)\n", "\n", "# Simulate to a threshold\n", "(times, _, states, outputs, _) = m.simulate_to_threshold(future_load, **config)\n", "\n", "# Print Results\n", - "print('states:')\n", - "for (t,x) in zip(times, states):\n", - " print('\\t{:.2f}s: {}'.format(t, x))\n", + "print(\"states:\")\n", + "for t, x in zip(times, states):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", "\n", - "print('\\nimpact time: {:.2f}s'.format(times[-1]))\n", + "print(\"\\nimpact time: {:.2f}s\".format(times[-1]))\n", "\n", "# Plot results\n", "states.plot()" @@ -461,17 +471,17 @@ "metadata": {}, "outputs": [], "source": [ - "m = ThrownObject(process_noise = {'x': 50, 'v': 0})\n", + "m = ThrownObject(process_noise={\"x\": 50, \"v\": 0})\n", "\n", "# Simulate to a threshold\n", "(times, _, states, outputs, _) = m.simulate_to_threshold(future_load, **config)\n", "\n", "# Print Results\n", - "print('states:')\n", - "for (t,x) in zip(times, states):\n", - " print('\\t{:.2f}s: {}'.format(t, x))\n", + "print(\"states:\")\n", + "for t, x in zip(times, states):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", "\n", - "print('\\nimpact time: {:.2f}s'.format(times[-1]))\n", + "print(\"\\nimpact time: {:.2f}s\".format(times[-1]))\n", "\n", "# Plot results\n", "states.plot()" @@ -512,47 +522,52 @@ "source": [ "from prog_models import PrognosticsModel\n", "\n", + "\n", "class ThrownObject(PrognosticsModel):\n", " \"\"\"\n", " Model that simulates an object thrown directly into the air (vertically) without air resistance\n", " \"\"\"\n", "\n", - " inputs = [] # no inputs, no way to control\n", + " inputs = [] # no inputs, no way to control\n", " states = [\n", - " 'x', # Vertical position (m) \n", - " 'v' # Velocity (m/s)\n", - " ]\n", - " outputs = [ # Anything we can measure\n", - " 'x' # Position (m)\n", + " \"x\", # Vertical position (m)\n", + " \"v\", # Velocity (m/s)\n", " ]\n", - " events = [ # Events that can/will occur during simulation\n", - " 'falling', # Event- object is falling\n", - " 'impact' # Event- object has impacted ground\n", + " outputs = [ # Anything we can measure\n", + " \"x\" # Position (m)\n", + " ]\n", + " events = [ # Events that can/will occur during simulation\n", + " \"falling\", # Event- object is falling\n", + " \"impact\", # Event- object has impacted ground\n", " ]\n", "\n", - " # The Default parameters for any ThrownObject. \n", + " # The Default parameters for any ThrownObject.\n", " # Overwritten by passing parameters into constructor as kwargs or by setting model.parameters\n", " default_parameters = {\n", - " 'thrower_height': 1.83, # Height of thrower (m)\n", - " 'throwing_speed': 40, # Velocity at which the ball is thrown (m/s)\n", - " 'g': -9.81, # Acceleration due to gravity (m/s^2)\n", - " 'process_noise': 0.0 # amount of noise in each step\n", - " } \n", + " \"thrower_height\": 1.83, # Height of thrower (m)\n", + " \"throwing_speed\": 40, # Velocity at which the ball is thrown (m/s)\n", + " \"g\": -9.81, # Acceleration due to gravity (m/s^2)\n", + " \"process_noise\": 0.0, # amount of noise in each step\n", + " }\n", "\n", " # First function: Initialize. This function is used to initialize the first state of the model.\n", - " # In this case we do not need input (u) or output (z) to initialize the model, \n", + " # In this case we do not need input (u) or output (z) to initialize the model,\n", " # so we set them to None, but that's not true for every model.\n", - " # u and z are Input and Output, respectively. \n", + " # u and z are Input and Output, respectively.\n", " # Values can be accessed like a dictionary (e.g., z['x']) using the keys from inputs and outputs, respectively.\n", " # or they can be accessed using the matrix (i.e., z.matrix)\n", " def initialize(self, u=None, z=None):\n", " self.max_x = 0.0\n", - " return self.StateContainer({\n", - " 'x': self.parameters['thrower_height'], # initial altitude is height of thrower\n", - " 'v': self.parameters['throwing_speed'] \n", - " })\n", - " \n", - " # Second function: state transition. \n", + " return self.StateContainer(\n", + " {\n", + " \"x\": self.parameters[\n", + " \"thrower_height\"\n", + " ], # initial altitude is height of thrower\n", + " \"v\": self.parameters[\"throwing_speed\"],\n", + " }\n", + " )\n", + "\n", + " # Second function: state transition.\n", " # State transition can be defined in one of two ways:\n", " # 1) Discrete models use next_state(x, u, dt) -> x'\n", " # 2) Continuous models (preferred) use dx(x, u) -> dx/dt\n", @@ -560,15 +575,18 @@ " # In this case we choose the continuous model, so we define dx(x, u)\n", " # This function defines the first derivative of the state with respect to time, as a function of model configuration (self.parameters), state (x) and input (u).\n", " # Here input isn't used. But past state and configuration are.\n", - " # \n", - " # x and u are State and Input, respectively. \n", + " #\n", + " # x and u are State and Input, respectively.\n", " # Values can be accessed like a dictionary (e.g., x['x']) using the keys from states and inputs, respectively.\n", " # or they can be accessed using the matrix (i.e., x.matrix)\n", " def dx(self, x, u):\n", - " return self.StateContainer({\n", - " 'x': x['v'], # dx/dt = v\n", - " 'v': self.parameters['g'] # Acceleration of gravity\n", - " })\n", + " return self.StateContainer(\n", + " {\n", + " \"x\": x[\"v\"], # dx/dt = v\n", + " \"v\": self.parameters[\"g\"], # Acceleration of gravity\n", + " }\n", + " )\n", + "\n", " # Equivalently, the state transition could have been defined as follows:\n", " # def next_state(self, x, u, dt):\n", " # return self.StateContainer({\n", @@ -576,14 +594,12 @@ " # 'v': x['v'] + self.parameters['g']*dt\n", " # })\n", "\n", - " # Now, we define the output equation. \n", + " # Now, we define the output equation.\n", " # This function estimates the output (i.e., measured values) given the system state (x) and system parameters (self.parameters).\n", - " # In this example, we're saying that the state 'x' can be directly measured. \n", - " # But in most cases output will have to be calculated from state. \n", + " # In this example, we're saying that the state 'x' can be directly measured.\n", + " # But in most cases output will have to be calculated from state.\n", " def output(self, x):\n", - " return self.OutputContainer({\n", - " 'x': x['x']\n", - " })\n", + " return self.OutputContainer({\"x\": x[\"x\"]})\n", "\n", " # Next, we define the event state equation\n", " # This is the first equation that actually describes the progress of a system towards the events.\n", @@ -593,11 +609,15 @@ " # Here the two event states are as follows:\n", " # 1) falling: 1 is defined as when the system is moving at the maximum speed (i.e., throwing_speed), and 0 is when velocity is negative (i.e., falling)\n", " # 2) impact: 1 is defined as the ratio of the current altitude (x) to the maximum altitude (max_x), and 0 is when the current altitude is 0 (i.e., impact)\n", - " def event_state(self, x): \n", - " self.max_x = max(self.max_x, x['x']) # Maximum altitude\n", + " def event_state(self, x):\n", + " self.max_x = max(self.max_x, x[\"x\"]) # Maximum altitude\n", " return {\n", - " 'falling': max(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed\n", - " 'impact': max(x['x']/self.max_x,0) # Ratio of current altitude to maximum altitude\n", + " \"falling\": max(\n", + " x[\"v\"] / self.parameters[\"throwing_speed\"], 0\n", + " ), # Throwing speed is max speed\n", + " \"impact\": max(\n", + " x[\"x\"] / self.max_x, 0\n", + " ), # Ratio of current altitude to maximum altitude\n", " }\n", "\n", " # Finally, we define the threshold equation.\n", @@ -606,10 +626,7 @@ " # However, this implementation is more efficient, so we included it\n", " # This function maps system state (x) and system parameters (self.parameters) a boolean indicating if the event has been met for each event.\n", " def threshold_met(self, x):\n", - " return {\n", - " 'falling': x['v'] < 0,\n", - " 'impact': x['x'] <= 0\n", - " }\n" + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}" ] }, { @@ -627,15 +644,20 @@ "source": [ "m = ThrownObject()\n", "\n", + "\n", "def future_load(t, x=None):\n", - " return m.InputContainer({}) # No loading\n", - "event = 'impact' # Simulate until impact\n", + " return m.InputContainer({}) # No loading\n", "\n", - "(times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1)\n", + "\n", + "event = \"impact\" # Simulate until impact\n", + "\n", + "(times, inputs, states, outputs, event_states) = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], dt=0.005, save_freq=1\n", + ")\n", "\n", "# Plot results\n", - "event_states.plot(ylabel= ['falling', 'impact'], compact= False)\n", - "states.plot(ylabel= {'x': \"position (m)\", 'v': 'velocity (m/s)'}, compact= False)" + "event_states.plot(ylabel=[\"falling\", \"impact\"], compact=False)\n", + "states.plot(ylabel={\"x\": \"position (m)\", \"v\": \"velocity (m/s)\"}, compact=False)" ] }, { @@ -674,13 +696,14 @@ "# Step 1: Define a function for the relationship between thrower_height and throwing_speed.\n", "def update_thrown_speed(params):\n", " return {\n", - " 'throwing_speed': params['thrower_height'] * 21.85\n", + " \"throwing_speed\": params[\"thrower_height\"] * 21.85\n", " } # Assumes thrown_speed is linear function of height\n", "\n", + "\n", "# Step 2: Define the param callbacks\n", "ThrownObject.param_callbacks = {\n", - " 'thrower_height': [update_thrown_speed]\n", - " } # Tell the derived callbacks feature to call this function when thrower_height changes." + " \"thrower_height\": [update_thrown_speed]\n", + "} # Tell the derived callbacks feature to call this function when thrower_height changes." ] }, { @@ -697,12 +720,20 @@ "outputs": [], "source": [ "obj = ThrownObject()\n", - "print(\"Default Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed']))\n", + "print(\n", + " \"Default Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(\n", + " obj.parameters[\"thrower_height\"], obj.parameters[\"throwing_speed\"]\n", + " )\n", + ")\n", "\n", "# Now let's change the thrower_height\n", "print(\"changing height...\")\n", - "obj.parameters['thrower_height'] = 1.75 # Our thrower is 1.75 m tall\n", - "print(\"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed']))\n", + "obj.parameters[\"thrower_height\"] = 1.75 # Our thrower is 1.75 m tall\n", + "print(\n", + " \"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(\n", + " obj.parameters[\"thrower_height\"], obj.parameters[\"throwing_speed\"]\n", + " )\n", + ")\n", "print(\"Notice how speed changed automatically with height\")\n", "\n", "# Let's delete the callback so we can use the same model in the future:\n", @@ -734,12 +765,11 @@ "from numpy import inf\n", "\n", "ThrownObject.state_limits = {\n", - " # object may not go below ground\n", - " 'x': (0, inf),\n", - "\n", - " # object may not exceed the speed of light\n", - " 'v': (-299792458, 299792458)\n", - " }" + " # object may not go below ground\n", + " \"x\": (0, inf),\n", + " # object may not exceed the speed of light\n", + " \"v\": (-299792458, 299792458),\n", + "}" ] }, { @@ -757,7 +787,7 @@ "metadata": {}, "outputs": [], "source": [ - "x = {'x': -5, 'v': 3e8} # Too fast and below the ground\n", + "x = {\"x\": -5, \"v\": 3e8} # Too fast and below the ground\n", "x = obj.apply_limits(x)\n", "print(x)" ] @@ -794,17 +824,17 @@ "outputs": [], "source": [ "times = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n", - "inputs = [{}]*9\n", + "inputs = [{}] * 9\n", "outputs = [\n", - " {'x': 1.83},\n", - " {'x': 36.95},\n", - " {'x': 62.36},\n", - " {'x': 77.81},\n", - " {'x': 83.45},\n", - " {'x': 79.28},\n", - " {'x': 65.3},\n", - " {'x': 41.51},\n", - " {'x': 7.91},\n", + " {\"x\": 1.83},\n", + " {\"x\": 36.95},\n", + " {\"x\": 62.36},\n", + " {\"x\": 77.81},\n", + " {\"x\": 83.45},\n", + " {\"x\": 79.28},\n", + " {\"x\": 65.3},\n", + " {\"x\": 41.51},\n", + " {\"x\": 7.91},\n", "]" ] }, @@ -821,7 +851,7 @@ "metadata": {}, "outputs": [], "source": [ - "keys = ['thrower_height', 'throwing_speed']" + "keys = [\"thrower_height\", \"throwing_speed\"]" ] }, { @@ -837,7 +867,7 @@ "metadata": {}, "outputs": [], "source": [ - "m.parameters['thrower_height'] = 20" + "m.parameters[\"thrower_height\"] = 20" ] }, { @@ -853,10 +883,10 @@ "metadata": {}, "outputs": [], "source": [ - "print('Model configuration before')\n", + "print(\"Model configuration before\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=1e-4))" ] }, { @@ -877,10 +907,10 @@ "m.estimate_params([(times, inputs, outputs)], keys, dt=0.01)\n", "\n", "# Print result\n", - "print('\\nOptimized configuration')\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=1e-4))" ] }, { diff --git a/docs/_downloads/2176a8ad664aae49752084c44b205627/sim_valve.py b/docs/_downloads/2176a8ad664aae49752084c44b205627/sim_valve.py index 320967d4..d869d507 100644 --- a/docs/_downloads/2176a8ad664aae49752084c44b205627/sim_valve.py +++ b/docs/_downloads/2176a8ad664aae49752084c44b205627/sim_valve.py @@ -2,74 +2,85 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a pneumatic valve being simulated until threshold is met. +Example of a pneumatic valve being simulated until threshold is met. """ from progpy.models.pneumatic_valve import PneumaticValve -def run_example(): + +def run_example(): # Create a model object - valv = PneumaticValve(process_noise= 0) + valv = PneumaticValve(process_noise=0) # Define future loading function cycle_time = 20 + def future_loading(t, x=None): - t = t % cycle_time - if t < cycle_time/2: - return valv.InputContainer({ - 'pL': 3.5e5, - 'pR': 2.0e5, + t = t % cycle_time + if t < cycle_time / 2: + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, # Open Valve - 'uTop': False, - 'uBot': True - }) - return valv.InputContainer({ - 'pL': 3.5e5, - 'pR': 2.0e5, + "uTop": False, + "uBot": True, + } + ) + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, # Close Valve - 'uTop': True, - 'uBot': False - }) + "uTop": True, + "uBot": False, + } + ) # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") # Configure options config = { - 'dt': 0.01, - 'horizon': 800, - 'save_freq': 60, - 'print': True, - 'progress': True, + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, } # Set wear parameter for spring to 1 - valv.parameters['x0']['wk'] = 1 + valv.parameters["x0"]["wk"] = 1 # Define first measured output. This is needed by the simulate_to_threshold method to initialize state first_output = valv.output(valv.initialize(future_loading(0))) # Simulate - simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config) + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) # Simulate to threshold again but with a different wear mode - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") # Configure options config = { - 'dt': 0.01, - 'horizon': 800, - 'save_freq': 60, - 'print': True, - 'progress': True + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, } # Reset wear parameter for spring to 0, set wear parameter for friction to 1 - valv.parameters['x0']['wk'] = 0 - valv.parameters['x0']['wr'] = 1 + valv.parameters["x0"]["wk"] = 0 + valv.parameters["x0"]["wr"] = 1 # Define first measured output. This is needed by the simulate_to_threshold method to initialize state first_output = valv.output(valv.initialize(future_loading(0))) # Simulate - simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config) + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/221f3e0372f568160c580dff03b83b4e/sim_battery_eol.ipynb b/docs/_downloads/221f3e0372f568160c580dff03b83b4e/sim_battery_eol.ipynb index c0cb027c..1be0ae00 100644 --- a/docs/_downloads/221f3e0372f568160c580dff03b83b4e/sim_battery_eol.ipynb +++ b/docs/_downloads/221f3e0372f568160c580dff03b83b4e/sim_battery_eol.ipynb @@ -1,54 +1,109 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n\nfrom progpy.models import BatteryElectroChem as Battery\n\ndef run_example(): \n # Step 1: Create a model object\n batt = Battery()\n\n # Step 2: Define future loading function \n # Here we're using a function designed to charge until 0.95, \n # then discharge until 0.05\n load = 1\n\n def future_loading(t, x=None):\n nonlocal load \n\n # Rule for loading after initialization\n if x is not None:\n # Current event state in the form {'EOD': <(0, 1)>, 'InsufficientCapacity': <(0, 1)>}\n event_state = batt.event_state(x)\n if event_state[\"EOD\"] > 0.95:\n load = 1 # Discharge\n elif event_state[\"EOD\"] < 0.05:\n load = -1 # Charge\n # Rule for loading at initialization\n return batt.InputContainer({'i': load})\n\n # Step 3: Simulate to Capacity is insufficient Threshold\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n options = {\n 'save_freq': 1000, # Frequency at which results are saved\n 'dt': 2, # Timestep\n 'threshold_keys': ['InsufficientCapacity'], # Simulate to InsufficientCapacity\n 'print': True\n }\n simulated_results = batt.simulate_to_threshold(future_loading, **options)\n\n # Step 4: Plot Results\n simulated_results.inputs.plot(ylabel='Current drawn (amps)')\n simulated_results.event_states.plot(ylabel='Event States', labels={'EOD': 'State of Charge (SOC)', 'InsufficientCapacity': 'State of Health (SOH)'})\n plt.ylim([0, 1])\n\n plt.show()\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "from progpy.models import BatteryElectroChem as Battery\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Create a model object\n", + " batt = Battery()\n", + "\n", + " # Step 2: Define future loading function\n", + " # Here we're using a function designed to charge until 0.95,\n", + " # then discharge until 0.05\n", + " load = 1\n", + "\n", + " def future_loading(t, x=None):\n", + " nonlocal load\n", + "\n", + " # Rule for loading after initialization\n", + " if x is not None:\n", + " # Current event state in the form {'EOD': <(0, 1)>, 'InsufficientCapacity': <(0, 1)>}\n", + " event_state = batt.event_state(x)\n", + " if event_state[\"EOD\"] > 0.95:\n", + " load = 1 # Discharge\n", + " elif event_state[\"EOD\"] < 0.05:\n", + " load = -1 # Charge\n", + " # Rule for loading at initialization\n", + " return batt.InputContainer({\"i\": load})\n", + "\n", + " # Step 3: Simulate to Capacity is insufficient Threshold\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " options = {\n", + " \"save_freq\": 1000, # Frequency at which results are saved\n", + " \"dt\": 2, # Timestep\n", + " \"threshold_keys\": [\"InsufficientCapacity\"], # Simulate to InsufficientCapacity\n", + " \"print\": True,\n", + " }\n", + " simulated_results = batt.simulate_to_threshold(future_loading, **options)\n", + "\n", + " # Step 4: Plot Results\n", + " simulated_results.inputs.plot(ylabel=\"Current drawn (amps)\")\n", + " simulated_results.event_states.plot(\n", + " ylabel=\"Event States\",\n", + " labels={\n", + " \"EOD\": \"State of Charge (SOC)\",\n", + " \"InsufficientCapacity\": \"State of Health (SOH)\",\n", + " },\n", + " )\n", + " plt.ylim([0, 1])\n", + "\n", + " plt.show()\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/234783871ee7e6c239e46a53e2e5a1cb/benchmarking_example.py b/docs/_downloads/234783871ee7e6c239e46a53e2e5a1cb/benchmarking_example.py index ec70e269..02e614ca 100644 --- a/docs/_downloads/234783871ee7e6c239e46a53e2e5a1cb/benchmarking_example.py +++ b/docs/_downloads/234783871ee7e6c239e46a53e2e5a1cb/benchmarking_example.py @@ -1,16 +1,16 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. """ -This example performs benchmarking for a state estimation and prediction with uncertainty given a Prognostics Model. The process and benchmarking analysis are run for various sample sizes. - +This example performs benchmarking for a state estimation and prediction with uncertainty given a Prognostics Model. The process and benchmarking analysis are run for various sample sizes. + Method: An instance of the BatteryCircuit model in prog_models is created, state estimation is set up with a chosen state_estimator, and prediction is set up with a chosen predictor. - Prediction of future states (with uncertainty) is then performed for various sample sizes. - Metrics are calculated and displayed for each run. + Prediction of future states (with uncertainty) is then performed for various sample sizes. + Metrics are calculated and displayed for each run. -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction for each distinct sample size ii) Time event is predicted to occur (with uncertainty) - iii) Various prediction metrics, including alpha-lambda metric + iii) Various prediction metrics, including alpha-lambda metric """ from progpy.models import BatteryCircuit as Battery @@ -19,56 +19,63 @@ from prog_algs import * + def run_example(): # Step 1: Setup Model and Future Loading batt = Battery() + def future_loading(t, x={}): - # Variable (piece-wise) future loading scheme - if (t < 600): + # Variable (piece-wise) future loading scheme + if t < 600: i = 2 - elif (t < 900): + elif t < 900: i = 1 - elif (t < 1800): + elif t < 1800: i = 4 - elif (t < 3000): + elif t < 3000: i = 2 else: i = 3 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) - # Step 2: Setup Predictor - pred = predictors.MonteCarlo(batt, dt= 0.05) + # Step 2: Setup Predictor + pred = predictors.MonteCarlo(batt, dt=0.05) # Step 3: Estimate State x0 = batt.initialize() state_estimator = state_estimators.ParticleFilter(batt, x0) # Send in some data to estimate state - z1 = batt.OutputContainer({'t': 32.2, 'v': 3.915}) - z2 = batt.OutputContainer({'t': 32.3, 'v': 3.91}) + z1 = batt.OutputContainer({"t": 32.2, "v": 3.915}) + z2 = batt.OutputContainer({"t": 32.3, "v": 3.91}) state_estimator.estimate(0.1, future_loading(0.1), z1) state_estimator.estimate(0.2, future_loading(0.2), z2) # Step 4: Benchmark Predictions # Here we're comparing the results given different numbers of samples - print('Benchmarking...') + print("Benchmarking...") import time # For timing prediction - from progpy.metrics import samples as metrics + from progpy.metrics import samples as metrics # Perform benchmarking for each number of samples sample_counts = [1, 2, 5, 10] for sample_count in sample_counts: - print('\nRun 1 ({} samples)'.format(sample_count)) + print("\nRun 1 ({} samples)".format(sample_count)) start = time.perf_counter() - pred_results = pred.predict(state_estimator.x, future_loading, n_samples = sample_count) + pred_results = pred.predict( + state_estimator.x, future_loading, n_samples=sample_count + ) toe = pred_results.time_of_event.key("EOD") # Looking at EOD event end = time.perf_counter() - print('\tMSE: {:4.2f}s'.format(metrics.mean_square_error(toe, 3005.4))) - print('\tRMSE: {:4.2f}s'.format(metrics.root_mean_square_error(toe, 3005.4))) - print('\tRuntime: {:4.2f}s'.format(end - start)) + print("\tMSE: {:4.2f}s".format(metrics.mean_square_error(toe, 3005.4))) + print( + "\tRMSE: {:4.2f}s".format(metrics.root_mean_square_error(toe, 3005.4)) + ) + print("\tRuntime: {:4.2f}s".format(end - start)) - # This same approach can be applied for benchmarking and comparing other changes + # This same approach can be applied for benchmarking and comparing other changes # For example: different sampling methods, prediction algorithms, step sizes, models -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/257ac46232d6997b8582c10d2b58439a/sim_valve.py b/docs/_downloads/257ac46232d6997b8582c10d2b58439a/sim_valve.py index 6f83a17f..b2223bbc 100644 --- a/docs/_downloads/257ac46232d6997b8582c10d2b58439a/sim_valve.py +++ b/docs/_downloads/257ac46232d6997b8582c10d2b58439a/sim_valve.py @@ -2,74 +2,85 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a pneumatic valve being simulated until threshold is met. +Example of a pneumatic valve being simulated until threshold is met. """ from progpy.models.pneumatic_valve import PneumaticValve -def run_example(): + +def run_example(): # Create a model object - valv = PneumaticValve(process_noise= 0) + valv = PneumaticValve(process_noise=0) # Define future loading function cycle_time = 20 + def future_loading(t, x=None): - t = t % cycle_time - if t < cycle_time/2: - return valv.InputContainer({ - 'pL': 3.5e5, - 'pR': 2.0e5, + t = t % cycle_time + if t < cycle_time / 2: + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, # Open Valve - 'uTop': False, - 'uBot': True - }) - return valv.InputContainer({ - 'pL': 3.5e5, - 'pR': 2.0e5, + "uTop": False, + "uBot": True, + } + ) + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, # Close Valve - 'uTop': True, - 'uBot': False - }) + "uTop": True, + "uBot": False, + } + ) # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") # Configure options config = { - 'dt': 0.01, - 'horizon': 800, - 'save_freq': 60, - 'print': True, - 'progress': True, + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, } # Set wear parameter for spring to 1 - valv.parameters['x0']['wk'] = 1 + valv.parameters["x0"]["wk"] = 1 # Define first measured output. This is needed by the simulat_to_threshold method to initialize state first_output = valv.output(valv.initialize(future_loading(0))) # Simulate - simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config) + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) # Simulate to threshold again but with a different wear mode - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") # Configure options config = { - 'dt': 0.01, - 'horizon': 800, - 'save_freq': 60, - 'print': True, - 'progress': True + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, } # Reset wear parameter for spring to 0, set wear parameter for friction to 1 - valv.parameters['x0']['wk'] = 0 - valv.parameters['x0']['wr'] = 1 + valv.parameters["x0"]["wk"] = 0 + valv.parameters["x0"]["wr"] = 1 # Define first measured output. This is needed by the simulat_to_threshold method to initialize state first_output = valv.output(valv.initialize(future_loading(0))) # Simulate - simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config) + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/290f3878724971806588517e4f7917c9/dynamic_step_size.py b/docs/_downloads/290f3878724971806588517e4f7917c9/dynamic_step_size.py index 74f80696..5a8baf1f 100644 --- a/docs/_downloads/290f3878724971806588517e4f7917c9/dynamic_step_size.py +++ b/docs/_downloads/290f3878724971806588517e4f7917c9/dynamic_step_size.py @@ -2,54 +2,70 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. +Example demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. """ from progpy.models.thrown_object import ThrownObject + def run_example(): print("EXAMPLE 1: dt of 1 until 8 sec, then 0.5\n\nSetting up...\n") # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return {} # Step 3: Define dynamic step size function - # This `next_time` function will specify what the next step of the simulation should be at any state and time. + # This `next_time` function will specify what the next step of the simulation should be at any state and time. # f(x, t) -> (t, dt) def next_time(t, x): - # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5 + # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5 if t < 8: return 1 return 0.5 # Step 4: Simulate to impact # Here we're printing every time step so we can see the step size change - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact']) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold( + future_load, + save_freq=1e-99, + print=True, + dt=next_time, + threshold_keys=["impact"], + ) # Example 2 - print("EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \n\nSetting up...\n") + print( + "EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \n\nSetting up...\n" + ) # Step 3: Define dynamic step size function - # This `next_time` function will specify what the next step of the simulation should be at any state and time. + # This `next_time` function will specify what the next step of the simulation should be at any state and time. # f(x, t) -> (t, dt) def next_time(t, x): # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.5, then 0.25 event_state = m.event_state(x) - if event_state['impact'] < 0.5: + if event_state["impact"] < 0.5: return 0.25 return 1 # Step 4: Simulate to impact # Here we're printing every time step so we can see the step size change - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact']) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold( + future_load, + save_freq=1e-99, + print=True, + dt=next_time, + threshold_keys=["impact"], + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/297aa4272f11fb20694cf8bf106f9751/new_model.py b/docs/_downloads/297aa4272f11fb20694cf8bf106f9751/new_model.py index d076707a..a9952ee9 100644 --- a/docs/_downloads/297aa4272f11fb20694cf8bf106f9751/new_model.py +++ b/docs/_downloads/297aa4272f11fb20694cf8bf106f9751/new_model.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example defining and testing a new model. +Example defining and testing a new model. """ from prog_models import PrognosticsModel @@ -13,107 +13,131 @@ class ThrownObject(PrognosticsModel): Model that similates an object thrown into the air without air resistance """ - inputs = [] # no inputs, no way to control + inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] - outputs = [ # Anything we can measure - 'x' # Position (m) + "x", # Position (m) + "v", # Velocity (m/s) + ] + outputs = [ # Anything we can measure + "x" # Position (m) ] events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] # The Default parameters. Overwritten by passing parameters dictionary into constructor default_parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81, # Acceleration due to gravity in m/s^2 - 'process_noise': 0.0 # amount of noise in each step + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 + "process_noise": 0.0, # amount of noise in each step } def initialize(self, u, z): self.max_x = 0.0 - return self.StateContainer({ - 'x': self.parameters['thrower_height'], # Thrown, so initial altitude is height of thrower - 'v': self.parameters['throwing_speed'] # Velocity at which the ball is thrown - this guy is a professional baseball pitcher - }) - + return self.StateContainer( + { + "x": self.parameters[ + "thrower_height" + ], # Thrown, so initial altitude is height of thrower + "v": self.parameters[ + "throwing_speed" + ], # Velocity at which the ball is thrown - this guy is a professional baseball pitcher + } + ) + def dx(self, x, u): - return self.StateContainer({'x': x['v'], - 'v': self.parameters['g']}) # Acceleration of gravity + return self.StateContainer( + {"x": x["v"], "v": self.parameters["g"]} + ) # Acceleration of gravity def output(self, x): - return self.OutputContainer({'x': x['x']}) + return self.OutputContainer({"x": x["x"]}) # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. # Threshold = Event State == 0. However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - self.max_x = max(self.max_x, x['x']) # Maximum altitude + def event_state(self, x): + self.max_x = max(self.max_x, x["x"]) # Maximum altitude return { - 'falling': max(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': max(x['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + x["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } + def run_example(): # Demo model # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return m.InputContainer({}) # No inputs, no way to control # Step 3: Simulate to impact - event = 'impact' - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, print = True) - + event = "impact" + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1, print=True + ) + # Print flight time - print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2))) + print( + "The object hit the ground in {} seconds".format( + round(simulated_results.times[-1], 2) + ) + ) - # OK, now lets compare performance on different heavenly bodies. + # OK, now lets compare performance on different heavenly bodies. # This requires that we update the cofiguration grav_moon = -1.62 # The first way to change the configuration is to pass in your desired config into construction of the model - m = ThrownObject(g = grav_moon) - simulated_moon_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1}) + m = ThrownObject(g=grav_moon) + simulated_moon_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], options={"dt": 0.005, "save_freq": 1} + ) grav_mars = -3.711 # You can also update the parameters after it's constructed - m.parameters['g'] = grav_mars - simulated_mars_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1}) + m.parameters["g"] = grav_mars + simulated_mars_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], options={"dt": 0.005, "save_freq": 1} + ) grav_venus = -8.87 - m.parameters['g'] = grav_venus - simulated_venus_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1}) + m.parameters["g"] = grav_venus + simulated_venus_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], options={"dt": 0.005, "save_freq": 1} + ) - print('Time to hit the ground: ') - print('\tvenus: {}s'.format(round(simulated_venus_results.times[-1],2))) - print('\tearth: {}s'.format(round(simulated_results.times[-1],2))) - print('\tmars: {}s'.format(round(simulated_mars_results.times[-1],2))) - print('\tmoon: {}s'.format(round(simulated_moon_results.times[-1],2))) + print("Time to hit the ground: ") + print("\tvenus: {}s".format(round(simulated_venus_results.times[-1], 2))) + print("\tearth: {}s".format(round(simulated_results.times[-1], 2))) + print("\tmars: {}s".format(round(simulated_mars_results.times[-1], 2))) + print("\tmoon: {}s".format(round(simulated_moon_results.times[-1], 2))) # We can also simulate until any event is met by neglecting the threshold_keys argument - simulated_results = m.simulate_to_threshold(future_load, options={'dt':0.005, 'save_freq':1}) + simulated_results = m.simulate_to_threshold( + future_load, options={"dt": 0.005, "save_freq": 1} + ) threshs_met = m.threshold_met(simulated_results.states[-1]) - for (key, met) in threshs_met.items(): + for key, met in threshs_met.items(): if met: event_occured = key - print('\nThis event that occured first: ', event_occured) + print("\nThis event that occured first: ", event_occured) # It falls before it hits the gorund, obviously # Metrics can be analyzed from the simulation results. For example: monotonicity - print('\nMonotonicity: ', simulated_results.event_states.monotonicity()) + print("\nMonotonicity: ", simulated_results.event_states.monotonicity()) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/29c5385a899974739f46017e800f4dd0/sim_dcmotor_singlephase.py b/docs/_downloads/29c5385a899974739f46017e800f4dd0/sim_dcmotor_singlephase.py new file mode 100644 index 00000000..2172c1a9 --- /dev/null +++ b/docs/_downloads/29c5385a899974739f46017e800f4dd0/sim_dcmotor_singlephase.py @@ -0,0 +1,45 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. + +""" +Example of a DC motor being simulated for a set amount of time, using the single-phase dcmotor model. +""" + +import math +from progpy.models import dcmotor_singlephase + + +def run_example(): + motor = dcmotor_singlephase.DCMotorSP() + + def future_loading(t, x=None): + f = 0.5 + + # Simple load proportional to rotor speed. + # This is a typical, hyper-simplified model of a fixed-pitch propeller directly attached to the motor shaft such that the resistant torque + # becomes: Cq * omega^2, where Cq is a (assumed to be) constant depending on the propeller profile and omega is the rotor speed. + # Since there's no transmission, omega is exactly the speed of the motor shaft. + if x is None: # First load (before state is initialized) + t_l = 0.0 + else: + t_l = 1e-5 * x["v_rot"] ** 2.0 + return motor.InputContainer( + { + "v": 10.0 + + 2.0 + * math.sin( + math.tau * f * t + ), # voltage input assumed sinusoidal just to show variations in the input. No physical meaning. + "t_l": t_l, # assuming constant load (simple) + } + ) + + simulated_results = motor.simulate_to( + 2.0, future_loading, dt=1e-3, save_freq=0.1, print=True + ) + simulated_results.states.plot(compact=False) + + +if __name__ == "__main__": + print("Simulation of DC single-phase motor") + run_example() diff --git a/docs/_downloads/2c63802187a2434ad1767985cf24e857/sim_valve.ipynb b/docs/_downloads/2c63802187a2434ad1767985cf24e857/sim_valve.ipynb index 8e61112e..c9bb89f6 100644 --- a/docs/_downloads/2c63802187a2434ad1767985cf24e857/sim_valve.ipynb +++ b/docs/_downloads/2c63802187a2434ad1767985cf24e857/sim_valve.ipynb @@ -1,54 +1,132 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample of a pneumatic valve being simulated until threshold is met. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models.pneumatic_valve import PneumaticValve\n\ndef run_example(): \n # Create a model object\n valv = PneumaticValve(process_noise= 0)\n\n # Define future loading function\n cycle_time = 20\n def future_loading(t, x=None):\n t = t % cycle_time\n if t < cycle_time/2:\n return valv.InputContainer({\n 'pL': 3.5e5,\n 'pR': 2.0e5,\n # Open Valve\n 'uTop': False,\n 'uBot': True\n })\n return valv.InputContainer({\n 'pL': 3.5e5,\n 'pR': 2.0e5,\n # Close Valve\n 'uTop': True,\n 'uBot': False\n })\n\n # Simulate to threshold\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n # Configure options\n config = {\n 'dt': 0.01,\n 'horizon': 800,\n 'save_freq': 60,\n 'print': True,\n 'progress': True,\n }\n # Set wear parameter for spring to 1\n valv.parameters['x0']['wk'] = 1\n\n # Define first measured output. This is needed by the simulat_to_threshold method to initialize state\n first_output = valv.output(valv.initialize(future_loading(0)))\n # Simulate\n simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config)\n\n # Simulate to threshold again but with a different wear mode\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n # Configure options\n config = {\n 'dt': 0.01,\n 'horizon': 800,\n 'save_freq': 60,\n 'print': True,\n 'progress': True\n }\n # Reset wear parameter for spring to 0, set wear parameter for friction to 1\n valv.parameters['x0']['wk'] = 0\n valv.parameters['x0']['wr'] = 1\n\n # Define first measured output. This is needed by the simulat_to_threshold method to initialize state\n first_output = valv.output(valv.initialize(future_loading(0)))\n # Simulate\n simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config)\n\n# This allows the module to be executed directly\nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample of a pneumatic valve being simulated until threshold is met. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models.pneumatic_valve import PneumaticValve\n", + "\n", + "\n", + "def run_example():\n", + " # Create a model object\n", + " valv = PneumaticValve(process_noise=0)\n", + "\n", + " # Define future loading function\n", + " cycle_time = 20\n", + "\n", + " def future_loading(t, x=None):\n", + " t = t % cycle_time\n", + " if t < cycle_time / 2:\n", + " return valv.InputContainer(\n", + " {\n", + " \"pL\": 3.5e5,\n", + " \"pR\": 2.0e5,\n", + " # Open Valve\n", + " \"uTop\": False,\n", + " \"uBot\": True,\n", + " }\n", + " )\n", + " return valv.InputContainer(\n", + " {\n", + " \"pL\": 3.5e5,\n", + " \"pR\": 2.0e5,\n", + " # Close Valve\n", + " \"uTop\": True,\n", + " \"uBot\": False,\n", + " }\n", + " )\n", + "\n", + " # Simulate to threshold\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " # Configure options\n", + " config = {\n", + " \"dt\": 0.01,\n", + " \"horizon\": 800,\n", + " \"save_freq\": 60,\n", + " \"print\": True,\n", + " \"progress\": True,\n", + " }\n", + " # Set wear parameter for spring to 1\n", + " valv.parameters[\"x0\"][\"wk\"] = 1\n", + "\n", + " # Define first measured output. This is needed by the simulat_to_threshold method to initialize state\n", + " first_output = valv.output(valv.initialize(future_loading(0)))\n", + " # Simulate\n", + " simulated_results = valv.simulate_to_threshold(\n", + " future_loading, first_output, **config\n", + " )\n", + "\n", + " # Simulate to threshold again but with a different wear mode\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " # Configure options\n", + " config = {\n", + " \"dt\": 0.01,\n", + " \"horizon\": 800,\n", + " \"save_freq\": 60,\n", + " \"print\": True,\n", + " \"progress\": True,\n", + " }\n", + " # Reset wear parameter for spring to 0, set wear parameter for friction to 1\n", + " valv.parameters[\"x0\"][\"wk\"] = 0\n", + " valv.parameters[\"x0\"][\"wr\"] = 1\n", + "\n", + " # Define first measured output. This is needed by the simulat_to_threshold method to initialize state\n", + " first_output = valv.output(valv.initialize(future_loading(0)))\n", + " # Simulate\n", + " simulated_results = valv.simulate_to_threshold(\n", + " future_loading, first_output, **config\n", + " )\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/2fce35805a321f89bc690688db5d368c/custom_model.ipynb b/docs/_downloads/2fce35805a321f89bc690688db5d368c/custom_model.ipynb index 3b8daec3..6dd30757 100644 --- a/docs/_downloads/2fce35805a321f89bc690688db5d368c/custom_model.ipynb +++ b/docs/_downloads/2fce35805a321f89bc690688db5d368c/custom_model.ipynb @@ -1,54 +1,177 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample building a custom model with LSTMStateTransitionModel.\n\nFor most cases, you will be able to use the standard LSTMStateTransitionModel.from_data class with configuration (see the LSTMStateTransitionModel class for more details). However, sometimes you might want to add custom layers, or other complex components. In that case, you will build a custom model and pass it into LSTMStateTransitionModel.\n\nIn this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. \n\nWe build and fit a custom model using keras.layers. Finally, we compare performance to the standard format and the original model.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom progpy.data_models import LSTMStateTransitionModel\nfrom progpy.models import BatteryElectroChemEOD\n\ndef run_example():\n print('Generating data...')\n batt = BatteryElectroChemEOD()\n future_loading_eqns = [lambda t, x=None: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)]\n # Generate data with different loading and step sizes\n # Adding the step size as an element of the output\n training_data = []\n input_data = []\n output_data = []\n for i in range(9):\n dt = i/3+0.25\n for loading_eqn in future_loading_eqns:\n d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) \n u = np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float)\n z = d.outputs\n training_data.append((u, z))\n input_data.append(u)\n output_data.append(z)\n\n # Step 2: Build standard model\n print(\"Building standard model...\")\n m_batt = LSTMStateTransitionModel.from_data(\n inputs = input_data,\n outputs = output_data, \n window=12, \n epochs=30, \n units=64, # Additional units given the increased complexity of the system\n input_keys = ['i', 'dt'],\n output_keys = ['t', 'v']) \n\n # Step 3: Build custom model\n print('Building custom model...')\n (u_all, z_all) = LSTMStateTransitionModel.pre_process_data(training_data, window=12)\n \n # Normalize\n n_inputs = len(training_data[0][0][0])\n u_mean = np.mean(u_all[:,0,:n_inputs], axis=0)\n u_std = np.std(u_all[:,0,:n_inputs], axis=0)\n # If there's no variation- dont normalize \n u_std[u_std == 0] = 1\n z_mean = np.mean(z_all, axis=0)\n z_std = np.std(z_all, axis=0)\n # If there's no variation- dont normalize \n z_std[z_std == 0] = 1\n\n # Add output (since z_t-1 is last input)\n u_mean = np.hstack((u_mean, z_mean))\n u_std = np.hstack((u_std, z_std))\n\n u_all = (u_all - u_mean)/u_std\n z_all = (z_all - z_mean)/z_std\n\n # u_mean and u_std act on the column vector form (from inputcontainer)\n # so we need to transpose them to a column vector\n normalization = (u_mean[np.newaxis].T, u_std[np.newaxis].T, z_mean, z_std)\n\n callbacks = [\n keras.callbacks.ModelCheckpoint(\"jena_sense.keras\", save_best_only=True)\n ]\n inputs = keras.Input(shape=u_all.shape[1:])\n x = layers.Bidirectional(layers.LSTM(128))(inputs)\n x = layers.Dropout(0.1)(x)\n x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x)\n model = keras.Model(inputs, x)\n model.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"mae\"])\n model.fit(u_all, z_all, epochs=30, callbacks = callbacks, validation_split = 0.1)\n\n # Step 4: Build LSTMStateTransitionModel\n m_custom = LSTMStateTransitionModel(model, \n normalization=normalization, \n input_keys = ['i', 'dt'],\n output_keys = ['t', 'v']\n )\n\n # Step 5: Simulate\n print('Simulating...')\n t_counter = 0\n x_counter = batt.initialize()\n def future_loading(t, x=None):\n return batt.InputContainer({'i': 3})\n\n def future_loading2(t, x = None):\n nonlocal t_counter, x_counter\n z = batt.output(x_counter)\n z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter})\n x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter)\n t_counter = t\n return z\n data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1)\n results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)\n results_custom = m_custom.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)\n\n # Step 6: Compare performance\n print('Comparing performance...')\n data.outputs.plot(title='original model', compact=False)\n results.outputs.plot(title='generated model', compact=False)\n results_custom.outputs.plot(title='custom model', compact=False)\n plt.show()\n\nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample building a custom model with LSTMStateTransitionModel.\n\nFor most cases, you will be able to use the standard LSTMStateTransitionModel.from_data class with configuration (see the LSTMStateTransitionModel class for more details). However, sometimes you might want to add custom layers, or other complex components. In that case, you will build a custom model and pass it into LSTMStateTransitionModel.\n\nIn this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. \n\nWe build and fit a custom model using keras.layers. Finally, we compare performance to the standard format and the original model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from tensorflow import keras\n", + "from tensorflow.keras import layers\n", + "\n", + "from progpy.data_models import LSTMStateTransitionModel\n", + "from progpy.models import BatteryElectroChemEOD\n", + "\n", + "\n", + "def run_example():\n", + " print(\"Generating data...\")\n", + " batt = BatteryElectroChemEOD()\n", + " future_loading_eqns = [\n", + " lambda t, x=None: batt.InputContainer({\"i\": 1 + 1.5 * load})\n", + " for load in range(6)\n", + " ]\n", + " # Generate data with different loading and step sizes\n", + " # Adding the step size as an element of the output\n", + " training_data = []\n", + " input_data = []\n", + " output_data = []\n", + " for i in range(9):\n", + " dt = i / 3 + 0.25\n", + " for loading_eqn in future_loading_eqns:\n", + " d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt)\n", + " u = np.array(\n", + " [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float\n", + " )\n", + " z = d.outputs\n", + " training_data.append((u, z))\n", + " input_data.append(u)\n", + " output_data.append(z)\n", + "\n", + " # Step 2: Build standard model\n", + " print(\"Building standard model...\")\n", + " m_batt = LSTMStateTransitionModel.from_data(\n", + " inputs=input_data,\n", + " outputs=output_data,\n", + " window=12,\n", + " epochs=30,\n", + " units=64, # Additional units given the increased complexity of the system\n", + " input_keys=[\"i\", \"dt\"],\n", + " output_keys=[\"t\", \"v\"],\n", + " )\n", + "\n", + " # Step 3: Build custom model\n", + " print(\"Building custom model...\")\n", + " (u_all, z_all) = LSTMStateTransitionModel.pre_process_data(training_data, window=12)\n", + "\n", + " # Normalize\n", + " n_inputs = len(training_data[0][0][0])\n", + " u_mean = np.mean(u_all[:, 0, :n_inputs], axis=0)\n", + " u_std = np.std(u_all[:, 0, :n_inputs], axis=0)\n", + " # If there's no variation- dont normalize\n", + " u_std[u_std == 0] = 1\n", + " z_mean = np.mean(z_all, axis=0)\n", + " z_std = np.std(z_all, axis=0)\n", + " # If there's no variation- dont normalize\n", + " z_std[z_std == 0] = 1\n", + "\n", + " # Add output (since z_t-1 is last input)\n", + " u_mean = np.hstack((u_mean, z_mean))\n", + " u_std = np.hstack((u_std, z_std))\n", + "\n", + " u_all = (u_all - u_mean) / u_std\n", + " z_all = (z_all - z_mean) / z_std\n", + "\n", + " # u_mean and u_std act on the column vector form (from inputcontainer)\n", + " # so we need to transpose them to a column vector\n", + " normalization = (u_mean[np.newaxis].T, u_std[np.newaxis].T, z_mean, z_std)\n", + "\n", + " callbacks = [\n", + " keras.callbacks.ModelCheckpoint(\"jena_sense.keras\", save_best_only=True)\n", + " ]\n", + " inputs = keras.Input(shape=u_all.shape[1:])\n", + " x = layers.Bidirectional(layers.LSTM(128))(inputs)\n", + " x = layers.Dropout(0.1)(x)\n", + " x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x)\n", + " model = keras.Model(inputs, x)\n", + " model.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"mae\"])\n", + " model.fit(u_all, z_all, epochs=30, callbacks=callbacks, validation_split=0.1)\n", + "\n", + " # Step 4: Build LSTMStateTransitionModel\n", + " m_custom = LSTMStateTransitionModel(\n", + " model,\n", + " normalization=normalization,\n", + " input_keys=[\"i\", \"dt\"],\n", + " output_keys=[\"t\", \"v\"],\n", + " )\n", + "\n", + " # Step 5: Simulate\n", + " print(\"Simulating...\")\n", + " t_counter = 0\n", + " x_counter = batt.initialize()\n", + "\n", + " def future_loading(t, x=None):\n", + " return batt.InputContainer({\"i\": 3})\n", + "\n", + " def future_loading2(t, x=None):\n", + " nonlocal t_counter, x_counter\n", + " z = batt.output(x_counter)\n", + " z = m_batt.InputContainer(\n", + " {\"i\": 3, \"t_t-1\": z[\"t\"], \"v_t-1\": z[\"v\"], \"dt\": t - t_counter}\n", + " )\n", + " x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter)\n", + " t_counter = t\n", + " return z\n", + "\n", + " data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1)\n", + " results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)\n", + " results_custom = m_custom.simulate_to(\n", + " data.times[-1], future_loading2, dt=1, save_freq=1\n", + " )\n", + "\n", + " # Step 6: Compare performance\n", + " print(\"Comparing performance...\")\n", + " data.outputs.plot(title=\"original model\", compact=False)\n", + " results.outputs.plot(title=\"generated model\", compact=False)\n", + " results_custom.outputs.plot(title=\"custom model\", compact=False)\n", + " plt.show()\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/31b6deef1d3aace088df741bfddb10f9/state_limits.py b/docs/_downloads/31b6deef1d3aace088df741bfddb10f9/state_limits.py index 0d15f409..5e8f3336 100644 --- a/docs/_downloads/31b6deef1d3aace088df741bfddb10f9/state_limits.py +++ b/docs/_downloads/31b6deef1d3aace088df741bfddb10f9/state_limits.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating when and how to identify model state limits. +Example demonstrating when and how to identify model state limits. In this example, state limits are defined for the ThrownObject Model. These are limits on the range of each state for a state-transition model. The use of this feature is then demonstrated. """ @@ -10,62 +10,75 @@ from math import inf from progpy.models.thrown_object import ThrownObject + def run_example(): # Demo model # Step 1: Create instance of model (without drag) - m = ThrownObject( cd = 0 ) + m = ThrownObject(cd=0) - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return {} # add state limits m.state_limits = { # object may not go below ground height - 'x': (0, inf), - + "x": (0, inf), # object may not exceed the speed of light - 'v': (-299792458, 299792458) + "v": (-299792458, 299792458), } # Step 3: Simulate to impact - event = 'impact' - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1) - + event = "impact" + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1 + ) + # Print states - print('Example 1') + print("Example 1") for i, state in enumerate(simulated_results.states): - print(f'State {i}: {state}') + print(f"State {i}: {state}") print() # Let's try setting x to a number outside of its bounds - x0 = m.initialize(u = {}, z = {}) - x0['x'] = -1 + x0 = m.initialize(u={}, z={}) + x0["x"] = -1 - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, x = x0) + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1, x=x0 + ) # Print states - print('Example 2') + print("Example 2") for i, state in enumerate(simulated_results.states): - print('State ', i, ': ', state) + print("State ", i, ": ", state) print() # Let's see what happens when the objects speed aproaches its limit - x0 = m.initialize(u = {}, z = {}) - x0['x'] = 1000000000 - x0['v'] = 0 - m.parameters['g'] = -50000000 - - print('Example 3') - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=0.3, x = x0, print = True, progress = False) + x0 = m.initialize(u={}, z={}) + x0["x"] = 1000000000 + x0["v"] = 0 + m.parameters["g"] = -50000000 + + print("Example 3") + simulated_results = m.simulate_to_threshold( + future_load, + threshold_keys=[event], + dt=0.005, + save_freq=0.3, + x=x0, + print=True, + progress=False, + ) # Note that the limits can also be applied manually using the apply_limits function - print('limiting states') - x = {'x': -5, 'v': 3e8} # Too fast and below the ground - print('\t Pre-limit: {}'.format(x)) + print("limiting states") + x = {"x": -5, "v": 3e8} # Too fast and below the ground + print("\t Pre-limit: {}".format(x)) x = m.apply_limits(x) - print('\t Post-limit: {}'.format(x)) + print("\t Post-limit: {}".format(x)) + -# This allows the module to be executed directly -if __name__=='__main__': - run_example() \ No newline at end of file +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/32c9784b74318f614dce1e92be7afc0c/05_Data Driven.ipynb b/docs/_downloads/32c9784b74318f614dce1e92be7afc0c/05_Data Driven.ipynb new file mode 100644 index 00000000..0554cfd6 --- /dev/null +++ b/docs/_downloads/32c9784b74318f614dce1e92be7afc0c/05_Data Driven.ipynb @@ -0,0 +1,168 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 5. Using Data-Driven Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition to the physics-based modeling functionalities described so far, ProgPy also includes a framework for implementing data-driven models. \n", + "\n", + "A data-driven model is a model where the behavior is learned from data. In ProgPy, data-driven models derive from the parent class `progpy.data_models.DataModel`. A common example of a data-driven model is one that uses neural networks (e.g., `progpy.data_models.LSTMStateTransitionModel`). \n", + "\n", + "Some data-driven methodologies we will be exploring in this section include [Long Short-Term Memory (LSTM)](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#lstmstatetransitionmodel), [Dynamic Mode Decomposition (DMD)](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#dmdmodel), and [Polynomial Chaos Expansion (PCE)](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#polynomialchaosexpansion). The data-driven architecture also includes [surrogate models](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#from-another-prognosticsmodel-i-e-surrogate), which can be used to create models that approximate the original/higher-fidelity models, generally resulting in a less accurate model that is more computationally efficient.\n", + "\n", + "For more information, refer to the [`DataModel` documentation](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html).\n", + "\n", + "Before we get started, make sure to install the data-driven dependencies using the following command:\n", + "\n", + "`pip install 'progpy[datadriven]'`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "\n", + "* [General Use](#General-Use)\n", + " * [Building a New Model from Data](#Building-a-New-Model-from-Data)\n", + " * [Surrogate Models](#Surrogate-models)\n", + "* [Long Short-Term Memory (LSTM)](#Long-Short-Term-Memory-(LSTM))\n", + "* [Dynamic Mode Decomposition (DMD)](#Dynamic-Mode-Decomposition-(DMD))\n", + "* [Polynomial Chaos Expansion (PCE)](#Polynomial-Chaos-Expansion-(PCE))\n", + "* [Extending](#Extending)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## General Use" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Building a New Model from Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Surrogate Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Long Short-Term Memory (LSTM)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dynamic Mode Decomposition (DMD)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Polynomial Chaos Expansion (PCE)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extending" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section, we were able to look at various ways to use data-driven models. The next section __[06 Combining Models](06_Combining%20Models.ipynb)__ examines how prognostics models can be combined." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.11.0 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.12.0" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/341975e0d32cbd13267902b1f2dc291d/horizon.py b/docs/_downloads/341975e0d32cbd13267902b1f2dc291d/horizon.py index 3b94f994..eb3b000e 100644 --- a/docs/_downloads/341975e0d32cbd13267902b1f2dc291d/horizon.py +++ b/docs/_downloads/341975e0d32cbd13267902b1f2dc291d/horizon.py @@ -2,12 +2,12 @@ """ This example performs a state estimation and prediction with uncertainty given a Prognostics Model with a specific prediction horizon. This prediction horizon marks the end of the "time of interest" for the prediction. Often this represents the end of a mission or sufficiently in the future where the user is unconcerned with the events - + Method: An instance of the Thrown Object model in prog_models is created, and the prediction process is achieved in three steps: 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate 2) Prediction of future states (with uncertainty) and the times at which the event thresholds will be reached, within the prediction horizon. All events outside the horizon come back as None and are ignored in metrics -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) """ @@ -16,11 +16,13 @@ from prog_algs import * from pprint import pprint + def run_example(): # Step 1: Setup model & future loading - def future_loading(t, x = None): + def future_loading(t, x=None): return {} - m = ThrownObject(process_noise = 0.2, measurement_noise = 0.1) + + m = ThrownObject(process_noise=0.2, measurement_noise=0.1) initial_state = m.initialize() # Step 2: Demonstrating state estimator @@ -28,7 +30,7 @@ def future_loading(t, x = None): # Step 2a: Setup NUM_SAMPLES = 1000 - filt = state_estimators.ParticleFilter(m, initial_state, num_particles = NUM_SAMPLES) + filt = state_estimators.ParticleFilter(m, initial_state, num_particles=NUM_SAMPLES) # VVV Uncomment this to use UKF State Estimator VVV # filt = state_estimators.UnscentedKalmanFilter(batt, initial_state) @@ -36,8 +38,8 @@ def future_loading(t, x = None): u = m.InputContainer({}) # No input for ThrownObject filt.estimate(0.1, u, m.output(initial_state)) - # Note: in a prognostic application the above state estimation - # step would be repeated each time there is new data. + # Note: in a prognostic application the above state estimation + # step would be repeated each time there is new data. # Here we're doing one step to demonstrate how the state estimator is used # Step 3: Demonstrating Predictor @@ -53,19 +55,27 @@ def future_loading(t, x = None): PREDICTION_HORIZON = 7.67 samples = filt.x # Since we're using a particle filter, which is also sample-based, we can directly use the samples, without changes STEP_SIZE = 0.001 - mc_results = mc.predict(samples, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON) + mc_results = mc.predict( + samples, future_loading, dt=STEP_SIZE, horizon=PREDICTION_HORIZON + ) print("\nPredicted Time of Event:") metrics = mc_results.time_of_event.metrics() pprint(metrics) # Note this takes some time - mc_results.time_of_event.plot_hist(keys = 'impact') - mc_results.time_of_event.plot_hist(keys = 'falling') + mc_results.time_of_event.plot_hist(keys="impact") + mc_results.time_of_event.plot_hist(keys="falling") + + print( + "\nSamples where impact occurs before horizon: {:.2f}%".format( + metrics["impact"]["number of samples"] / mc.parameters["n_samples"] * 100 + ) + ) - print("\nSamples where impact occurs before horizon: {:.2f}%".format(metrics['impact']['number of samples']/mc.parameters['n_samples']*100)) - # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/34e35cbaee2c6ee1700a3db00fe690ff/06_Combining Models.ipynb b/docs/_downloads/34e35cbaee2c6ee1700a3db00fe690ff/06_Combining Models.ipynb new file mode 100644 index 00000000..b43ba710 --- /dev/null +++ b/docs/_downloads/34e35cbaee2c6ee1700a3db00fe690ff/06_Combining Models.ipynb @@ -0,0 +1,968 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 6. Combining Prognostic Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This section demonstrates how prognostic models can be combined. There are two instances in which this is useful: \n", + "\n", + "1. Combining multiple models of different inter-related systems into one system-of-system model (i.e., [Composite Models](https://nasa.github.io/progpy/api_ref/prog_models/CompositeModel.html)), or\n", + "2. Combining multiple models of the same system to be simulated together and aggregated (i.e., [Ensemble Models](https://nasa.github.io/progpy/api_ref/prog_models/EnsembleModel.html) or [Mixture of Expert Models](https://nasa.github.io/progpy/api_ref/progpy/MixtureOfExperts.html)). This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors. \n", + "\n", + "These two methods for combining models are described in the following sections." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "\n", + "* [Composite Model](#Composite-Model)\n", + "* [Ensemble Model](#Ensemble-Model)\n", + "* [Mixture of Experts (MoE)](#Mixture-of-Experts-(MoE))\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Composite Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A `CompositeModel` is a `PrognosticsModel` that is composed of multiple `PrognosticsModels`. This is a tool for modeling system-of-systems. (i.e., interconnected systems), where the behavior and state of one system affects the state of another system. The composite prognostics models are connected using defined connections between the output or state of one model, and the input of another model. The resulting `CompositeModel` behaves as a single model.\n", + "\n", + "To illustrate this, we will create a composite model of an aircraft's electric powertrain, combining the `DCMotor`, `ESC`, and `PropellerLoad` models. The Electronic Speed Controller (`ESC`) converts a commanded duty (i.e., throttle) to signals to the motor. The motor then acts on the signals from the ESC to spin the load, which enacts a torque on the motor (in this case from air resistence).\n", + "\n", + "First we will import the used models, and the `CompositeModel` class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import DCMotor, ESC, PropellerLoad\n", + "from progpy import CompositeModel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we will initiate objects of the individual models that will later create the composite powertrain model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_motor = DCMotor()\n", + "m_esc = ESC()\n", + "m_load = PropellerLoad()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we have to define the connections between the systems. Let's first define the connections from the `DCMotor` to the propeller load. For this, we'll need to look at the `DCMotor` states and understand how they influence the `PropellerLoad` inputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"motor states: \", m_motor.states)\n", + "print(\"load inputs: \", m_load.inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Each of the states and inputs are described in the model documentation at [DC Motor Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#dc-motor) and [Propeller Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#propellerload). From reading the documentation we understand that the propeller's velocity is from the motor, so we can define the first connection:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "connections = [(\"DCMotor.v_rot\", \"PropellerLoad.v_rot\")]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Connections are defined as couples where the first value is the input for the second value. The connection above tells the composite model to feed the `DCMotor`'s `v_rot` into the `PropellerLoad`'s input `v_rot`.\n", + "\n", + "Next, let's look at the connections the other direction, from the load to the motor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"load states: \", m_load.states)\n", + "print(\"motor inputs: \", m_motor.inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We know here that the load on the motor is from the propeller load, so we can add that connection. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "connections.append((\"PropellerLoad.t_l\", \"DCMotor.t_l\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we will repeat the exercise with the `DCMotor` and `ESC`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"ESC states: \", m_esc.states)\n", + "print(\"motor inputs: \", m_motor.inputs)\n", + "connections.append((\"ESC.v_a\", \"DCMotor.v_a\"))\n", + "connections.append((\"ESC.v_b\", \"DCMotor.v_b\"))\n", + "connections.append((\"ESC.v_c\", \"DCMotor.v_c\"))\n", + "\n", + "print(\"motor states: \", m_motor.states)\n", + "print(\"ESC inputs: \", m_esc.inputs)\n", + "connections.append((\"DCMotor.theta\", \"ESC.theta\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we are ready to combine the models. We create a composite model with the inidividual models and the defined connections." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_powertrain = CompositeModel((m_esc, m_load, m_motor), connections=connections)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The resulting model includes two inputs, `ESC` voltage (from the battery) and duty (i.e., commanded throttle). These are the only two inputs not connected internally from the original three models. The states are a combination of all the states of every system. Finally, the outputs are a combination of all the outputs from each of the individual systems. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"inputs: \", m_powertrain.inputs)\n", + "print(\"states: \", m_powertrain.states)\n", + "print(\"outputs: \", m_powertrain.outputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Frequently users only want a subset of the outputs from the original model. For example, in this case you're unlikely to be measuring the individual voltages from the ESC. Outputs can be specified when creating the composite model. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_powertrain = CompositeModel(\n", + " (m_esc, m_load, m_motor),\n", + " connections=connections,\n", + " outputs={\"DCMotor.v_rot\", \"DCMotor.theta\"},\n", + ")\n", + "\n", + "print(\"outputs: \", m_powertrain.outputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the outputs are only `DCMotor` angle and velocity.\n", + "\n", + "The resulting model can be used in simulation, state estimation, and prediction the same way any other model would be, as demonstrated below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "load = m_powertrain.InputContainer(\n", + " {\n", + " \"ESC.duty\": 1, # 100% Throttle\n", + " \"ESC.v\": 23,\n", + " }\n", + ")\n", + "\n", + "\n", + "def future_loading(t, x=None):\n", + " return load\n", + "\n", + "\n", + "simulated_results = m_powertrain.simulate_to(\n", + " 1, future_loading, dt=2.5e-5, save_freq=2e-2\n", + ")\n", + "\n", + "fig = simulated_results.outputs.plot(\n", + " keys=[\"DCMotor.v_rot\"],\n", + " ylabel=\"velocity (rad/sec)\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model output\",\n", + ")\n", + "fig = simulated_results.states.plot(\n", + " keys=[\"DCMotor.i_b\", \"DCMotor.i_c\", \"DCMotor.i_a\"],\n", + " ylabel=\"ESC currents\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model states\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Parameters in composed models can be updated directly using the model_name.parameter name parameter of the composite model. Like so:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_powertrain.parameters[\"PropellerLoad.D\"] = 1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we updated the propeller diameter to 1, greatly increasing the load on the motor. You can see this in the updated simulation outputs (below). When compared to the original results above you will find that the maximum velocity is lower. This is expected given the larger propeller load." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulated_results = m_powertrain.simulate_to(\n", + " 1, future_loading, dt=2.5e-5, save_freq=2e-2\n", + ")\n", + "\n", + "fig = simulated_results.outputs.plot(\n", + " keys=[\"DCMotor.v_rot\"],\n", + " ylabel=\"velocity (rad/sec)\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model output with increased load\",\n", + ")\n", + "fig = simulated_results.states.plot(\n", + " keys=[\"DCMotor.i_b\", \"DCMotor.i_c\", \"DCMotor.i_a\"],\n", + " ylabel=\"ESC Currents\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model with increased load states\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that a function can be used to perform simple transitions between models. For example, if you wanted to multiply the torque by 1.1 to represent some gearing or additional load, that could be done by defining a function, as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def torque_multiplier(t_l):\n", + " return t_l * 1.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The function is referred to as 'function' by the composite model. So we can add the function into the connections as follows. Note that the argument name is used for the input of the function and 'return' is used to signify the function's return value. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "connections = [\n", + " (\"PropellerLoad.t_l\", \"function.t_l\"),\n", + " (\"function.return\", \"DCMotor.t_l\"),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's add back in the other connections and build the composite model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "connections.extend(\n", + " [\n", + " (\"ESC.v_a\", \"DCMotor.v_a\"),\n", + " (\"ESC.v_b\", \"DCMotor.v_b\"),\n", + " (\"ESC.v_c\", \"DCMotor.v_c\"),\n", + " (\"DCMotor.theta\", \"ESC.theta\"),\n", + " (\"DCMotor.v_rot\", \"PropellerLoad.v_rot\"),\n", + " ]\n", + ")\n", + "\n", + "m_powertrain = CompositeModel(\n", + " (m_esc, m_load, m_motor, torque_multiplier),\n", + " connections=connections,\n", + " outputs={\"DCMotor.v_rot\", \"DCMotor.theta\"},\n", + ")\n", + "\n", + "simulated_results = m_powertrain.simulate_to(\n", + " 1, future_loading, dt=2.5e-5, save_freq=2e-2\n", + ")\n", + "\n", + "fig = simulated_results.outputs.plot(\n", + " keys=[\"DCMotor.v_rot\"],\n", + " ylabel=\"velocity (rad/sec)\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Complete composite model output\",\n", + ")\n", + "fig = simulated_results.states.plot(\n", + " keys=[\"DCMotor.i_b\", \"DCMotor.i_c\", \"DCMotor.i_a\"],\n", + " ylabel=\"ESC currents\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Complete composite model states\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that you can also have functions with more than one argument. If you dont connect the arguments of the function to some model, it will show up in the inputs of the composite model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Ensemble Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An ensemble model is an approach to modeling where one or more models of the same system are simulated together and then aggregated into a single prediction. This can be multiple versions of the same model with different parameters, or different models of the same system representing different parts of the system's behavior. This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors.\n", + "\n", + "In ensemble models, aggregation occurs in two steps, at state transition and then output, event state, threshold met, or performance metric calculation. At each state transition, the states from each aggregate model are combined based on the defined aggregation method. When calling output, the resulting outputs from each aggregate model are similarily combined. The default method is mean, but the user can also choose to use a custom aggregator.\n", + "\n", + "![Aggregation](img/aggregation.png)\n", + "\n", + "To illustrate this, let's create an example where there we have four equivalent circuit models, each with different configuration parameters, below. These represent the range of possible configurations expected for our example system." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit\n", + "\n", + "m_circuit = BatteryCircuit()\n", + "m_circuit_2 = BatteryCircuit(qMax=7860)\n", + "m_circuit_3 = BatteryCircuit(qMax=6700, Rs=0.055)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create an `EnsembleModel` which combines each of these." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy import EnsembleModel\n", + "\n", + "m_ensemble = EnsembleModel(models=(m_circuit, m_circuit_2, m_circuit_3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's evaluate the performance of the combined model using real battery data from [NASA's prognostic data repository](https://nasa.github.io/progpy/api_ref/progpy/DataSets.html)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.datasets import nasa_battery\n", + "\n", + "data = nasa_battery.load_data(batt_id=8)[1]\n", + "RUN_ID = 0\n", + "test_input = [{\"i\": i} for i in data[RUN_ID][\"current\"]]\n", + "test_time = data[RUN_ID][\"relativeTime\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To evaluate the model we first create a future loading function that uses the loading from the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def future_loading(t, x=None):\n", + " for i, mission_time in enumerate(test_time):\n", + " if mission_time > t:\n", + " return m_circuit.InputContainer(test_input[i])\n", + " return m_circuit.InputContainer(test_input[-1]) # Default - last load" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t_end = test_time.iloc[-1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we will simulate the ensemble model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t_end = test_time.iloc[-1]\n", + "results_ensemble = m_ensemble.simulate_to(t_end, future_loading)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we compare the voltage predicted by the ensemble model with the ground truth from dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from matplotlib import pyplot as plt\n", + "\n", + "fig = plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", + "fig = plt.plot(\n", + " results_ensemble.times,\n", + " [z[\"v\"] for z in results_ensemble.outputs],\n", + " color=\"red\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The ensemble model actually performs pretty poorly here. This is mostly because there's an outlier model (`m_circuit_3`). This can be resolved using a different aggregation method. By default, aggregation uses the mean. Let's update the ensemble model to use median and resimulate." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "\n", + "m_ensemble[\"aggregation_method\"] = np.median\n", + "results_ensemble_median = m_ensemble.simulate_to(t_end, future_loading)\n", + "\n", + "fig = plt.plot(\n", + " results_ensemble_median.times,\n", + " [z[\"v\"] for z in results_ensemble_median.outputs],\n", + " color=\"orange\",\n", + " label=\"ensemble -median\",\n", + ")\n", + "fig = plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", + "fig = plt.plot(\n", + " results_ensemble.times,\n", + " [z[\"v\"] for z in results_ensemble.outputs],\n", + " color=\"red\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Much better! \n", + "\n", + "The same ensemble approach can be used with a heterogeneous set of models that have different states. Here we will repeat the exercise using the battery electrochemisty and equivalent circuit models. The two models share one state in common (`tb`), but otherwise are different" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOD\n", + "\n", + "m_electro = BatteryElectroChemEOD(qMobile=7800)\n", + "\n", + "print(\"Electrochem states: \", m_electro.states)\n", + "print(\"Equivalent Circuit States\", m_circuit.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's create an ensemble model combining these and evaluate it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_ensemble = EnsembleModel((m_circuit, m_electro))\n", + "results_ensemble = m_ensemble.simulate_to(t_end, future_loading)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To compare these results, let's also simulate the two models that comprise the ensemble model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_circuit1 = m_circuit.simulate_to(t_end, future_loading)\n", + "results_electro = m_electro.simulate_to(t_end, future_loading)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The results of each of these are plotted below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure()\n", + "plt.plot(\n", + " results_circuit1.times,\n", + " [z[\"v\"] for z in results_circuit1.outputs],\n", + " color=\"blue\",\n", + " label=\"circuit\",\n", + ")\n", + "plt.plot(\n", + " results_electro.times,\n", + " [z[\"v\"] for z in results_electro.outputs],\n", + " color=\"red\",\n", + " label=\"electro chemistry\",\n", + ")\n", + "plt.plot(\n", + " results_ensemble.times,\n", + " [z[\"v\"] for z in results_ensemble.outputs],\n", + " color=\"yellow\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the result may not be exactly between the other two models. This is because of aggregation is done in 2 steps: at state transition and then at output calculation.\n", + "\n", + "Ensemble models can be further extended to include an aggregator that selects the best model at any given time. That feature is described in the following section." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mixture of Experts (MoE)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Mixture of Experts (`MoE`) models combine multiple models of the same system, similar to ensemble models. Unlike ensemble models, the aggregation is done by selecting the \"best\" model. That is the model that has performed the best over the past. Each model will have a 'score' that is tracked in the state, and this determines which model is best.\n", + "\n", + "To demonstrate this feature we will repeat the example from the ensemble model section, this time with a mixture of experts model. For this example to work, we will have had to have run the ensemble model section above.\n", + "\n", + "First, let's combine the three battery circuit models into a single mixture of experts model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy import MixtureOfExpertsModel\n", + "\n", + "m = MixtureOfExpertsModel((m_circuit_3, m_circuit_2, m_circuit))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The combined model has the same outputs and events as the circuit model. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(m.outputs)\n", + "print(m.events)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Its states contain all of the states of each model, kept separate. Each individual model comprising the `MoE` model will be simulated separately, so the model keeps track of the states propogated through each model separately. The states also include scores for each model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(m.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `MoE` model inputs include both the comprised model input, `i` (current) and outputs: `v` (voltage) and `t`(temperature). The comprised model outputs are provided to update the scores of each model when performing state transition. If they are not provided when calling next_state, then scores would not be updated." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(m.inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's evaluate the performance of the combined model using real battery data from [NASA's prognostic data repository](https://nasa.github.io/progpy/api_ref/progpy/DataSets.html).\n", + "\n", + "To evaluate the model we first create a future loading function that uses the loading from the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_moe = m.simulate_to(t_end, future_loading)\n", + "fig = plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", + "fig = plt.plot(\n", + " results_moe.times,\n", + " [z[\"v\"] for z in results_moe.outputs],\n", + " color=\"red\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here the model performs pretty poorly. If you were to look at the state, we see that the three scores are equal. This is because we haven't provided any output information. The future load function doesn't include the output, just the input (`i`). When the three scores are equal like this, the first model is used." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model 1 Score: \", results_moe.states[-1][\"BatteryCircuit._score\"])\n", + "print(\"Model 2 Score: \", results_moe.states[-1][\"BatteryCircuit_2._score\"])\n", + "print(\"Model 3 Score: \", results_moe.states[-1][\"BatteryCircuit_3._score\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's provide the output for a few steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x0 = m.initialize()\n", + "x = m.next_state(\n", + " x=x0,\n", + " u=m.InputContainer(\n", + " {\n", + " \"i\": test_input[0][\"i\"],\n", + " \"v\": data[RUN_ID][\"voltage\"][0],\n", + " \"t\": data[RUN_ID][\"temperature\"][0],\n", + " }\n", + " ),\n", + " dt=test_time[1] - test_time[0],\n", + ")\n", + "x = m.next_state(\n", + " x=x,\n", + " u=m.InputContainer(\n", + " {\n", + " \"i\": test_input[1][\"i\"],\n", + " \"v\": data[RUN_ID][\"voltage\"][1],\n", + " \"t\": data[RUN_ID][\"temperature\"][1],\n", + " }\n", + " ),\n", + " dt=test_time[1] - test_time[0],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the model scores again." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model 1 Score: \", x[\"BatteryCircuit._score\"])\n", + "print(\"Model 2 Score: \", x[\"BatteryCircuit_2._score\"])\n", + "print(\"Model 3 Score: \", x[\"BatteryCircuit_3._score\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we see after a few steps the algorithm has determined that model 3 is the better fitting of the models. Now if we were to repeat the simulation, it would use the best model, resulting in a better fit. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_moe = m.simulate_to(t_end, future_loading, t0=test_time[1] - test_time[0], x=x)\n", + "fig = plt.plot(\n", + " test_time[2:], data[RUN_ID][\"voltage\"][2:], color=\"green\", label=\"ground truth\"\n", + ")\n", + "fig = plt.plot(\n", + " results_moe.times[2:],\n", + " [z[\"v\"] for z in results_moe.outputs][2:],\n", + " color=\"red\",\n", + " label=\"moe\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The fit here is much better. The `MoE` model learned which of the three models best fit the observed behavior.\n", + "\n", + "In a prognostic application, the scores will be updated each time you use a state estimator (so long as you provide the output as part of the input). Then when performing a prediction the scores aren't updated, since outputs are not known.\n", + "\n", + "An example of when this would be useful is for cases where there are three common degradation paths or \"modes\" rather than a single model with uncertainty to represent every mode, the three modes can be represented by three different models. Once enough of the degradation path has been observed the observed mode will be the one reported.\n", + "\n", + "If the model fit is expected to be stable (that is, the best model is not expected to change anymore). The best model can be extracted and used directly, as demonstrated below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "name, m_best = m.best_model(x)\n", + "print(name, \" was the best fit\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section we demonstrated a few methods for treating multiple models as a single model. This is of interest when there are multiple models of different systems which are interdependent (`CompositeModel`), multiple models of the same system that portray different parts of the behavior or different candidate representations (`EnsembleModel`), or multiple models of the same system that represent possible degradation modes (`MixtureOfExpertModel`).\n", + "\n", + "The next notebook __[07 State Estimation](07_State%20Estimation.ipynb)__ will be exploring state estimation, which is the process of estimating the current state of the system using sensor data and a prognostics model." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.11.0 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/35855301c3858d0099590c7ec809888a/dataset.ipynb b/docs/_downloads/35855301c3858d0099590c7ec809888a/dataset.ipynb index fde5141d..1c19b95d 100644 --- a/docs/_downloads/35855301c3858d0099590c7ec809888a/dataset.ipynb +++ b/docs/_downloads/35855301c3858d0099590c7ec809888a/dataset.ipynb @@ -1,54 +1,121 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample downloading and using a NASA prognostics dataset.\n\nIn this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "DATASET_ID = 1\n\ndef run_example():\n # Step 1: Download and import the dataset for a single battery\n # Note: This may take some time\n from progpy.datasets import nasa_battery\n print('Downloading... ', end='')\n (desc, data) = nasa_battery.load_data(DATASET_ID)\n print('done')\n\n # We recommend saving the dataset to disk for future use\n # This way you dont have to download it each time\n import pickle\n pickle.dump((desc, data), open(f'dataset_{DATASET_ID}.pkl', 'wb'))\n\n # Step 2: Access the dataset description\n print(f'\\nDataset {DATASET_ID}')\n print(desc['description'])\n print(f'Procedure: {desc[\"procedure\"]}')\n\n # Step 3: Access the dataset data\n # Data is in format [run_id][time][variable]\n # For the battery the variables are \n # 0: relativeTime (since beginning of run)\n # 1: current (amps)\n # 2: voltage\n # 3: temperature (\u00b0C)\n # so that data[a][b, 3] is the temperature at time index b (relative to the start of the run) for run a\n print(f'\\nNumber of runs: {len(data)}')\n print(f'\\nAnalyzing run 4')\n print(f'number of time indices: {len(data[4])}')\n print(f\"Details of run 4: {desc['runs'][4]}\")\n\n # Plot the run\n import matplotlib.pyplot as plt\n plt.figure()\n plt.subplot(2, 1, 1)\n plt.plot(data[4][:, 0], data[4][:, 1])\n plt.ylabel('Current (A)')\n\n plt.subplot(2, 1, 2)\n plt.plot(data[4][:, 0], data[4][:, 2])\n plt.ylabel('Voltage (V)')\n plt.xlabel('Time (s)')\n plt.title('Run 4')\n\n # Graph all reference discharge profiles\n indices = [i for i, x in enumerate(desc['runs']) if 'reference discharge' in x['desc'] and 'rest' not in x['desc']]\n plt.figure()\n for i in indices:\n plt.plot(data[i][:, 0], data[i][:, 2], label=f\"Run {i}\")\n plt.title('Reference discharge profiles')\n plt.xlabel('Time (s)')\n plt.ylabel('Voltage (V)')\n plt.show()\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample downloading and using a NASA prognostics dataset.\n\nIn this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "DATASET_ID = 1\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Download and import the dataset for a single battery\n", + " # Note: This may take some time\n", + " from progpy.datasets import nasa_battery\n", + "\n", + " print(\"Downloading... \", end=\"\")\n", + " (desc, data) = nasa_battery.load_data(DATASET_ID)\n", + " print(\"done\")\n", + "\n", + " # We recommend saving the dataset to disk for future use\n", + " # This way you dont have to download it each time\n", + " import pickle\n", + "\n", + " pickle.dump((desc, data), open(f\"dataset_{DATASET_ID}.pkl\", \"wb\"))\n", + "\n", + " # Step 2: Access the dataset description\n", + " print(f\"\\nDataset {DATASET_ID}\")\n", + " print(desc[\"description\"])\n", + " print(f\"Procedure: {desc['procedure']}\")\n", + "\n", + " # Step 3: Access the dataset data\n", + " # Data is in format [run_id][time][variable]\n", + " # For the battery the variables are\n", + " # 0: relativeTime (since beginning of run)\n", + " # 1: current (amps)\n", + " # 2: voltage\n", + " # 3: temperature (°C)\n", + " # so that data[a][b, 3] is the temperature at time index b (relative to the start of the run) for run a\n", + " print(f\"\\nNumber of runs: {len(data)}\")\n", + " print(\"\\nAnalyzing run 4\")\n", + " print(f\"number of time indices: {len(data[4])}\")\n", + " print(f\"Details of run 4: {desc['runs'][4]}\")\n", + "\n", + " # Plot the run\n", + " import matplotlib.pyplot as plt\n", + "\n", + " plt.figure()\n", + " plt.subplot(2, 1, 1)\n", + " plt.plot(data[4][:, 0], data[4][:, 1])\n", + " plt.ylabel(\"Current (A)\")\n", + "\n", + " plt.subplot(2, 1, 2)\n", + " plt.plot(data[4][:, 0], data[4][:, 2])\n", + " plt.ylabel(\"Voltage (V)\")\n", + " plt.xlabel(\"Time (s)\")\n", + " plt.title(\"Run 4\")\n", + "\n", + " # Graph all reference discharge profiles\n", + " indices = [\n", + " i\n", + " for i, x in enumerate(desc[\"runs\"])\n", + " if \"reference discharge\" in x[\"desc\"] and \"rest\" not in x[\"desc\"]\n", + " ]\n", + " plt.figure()\n", + " for i in indices:\n", + " plt.plot(data[i][:, 0], data[i][:, 2], label=f\"Run {i}\")\n", + " plt.title(\"Reference discharge profiles\")\n", + " plt.xlabel(\"Time (s)\")\n", + " plt.ylabel(\"Voltage (V)\")\n", + " plt.show()\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/379b8420d5a1e67e122db8eacfc78796/sim_powertrain.py b/docs/_downloads/379b8420d5a1e67e122db8eacfc78796/sim_powertrain.py index d511c071..5c0e0714 100644 --- a/docs/_downloads/379b8420d5a1e67e122db8eacfc78796/sim_powertrain.py +++ b/docs/_downloads/379b8420d5a1e67e122db8eacfc78796/sim_powertrain.py @@ -2,11 +2,12 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a powertrain being simulated for a set amount of time. +Example of a powertrain being simulated for a set amount of time. """ from prog_models.models import Powertrain, ESC, DCMotor + def run_example(): # Create a model object esc = ESC() @@ -15,16 +16,16 @@ def run_example(): # Define future loading function - 100% duty all the time def future_loading(t, x=None): - return powertrain.InputContainer({ - 'duty': 1, - 'v': 23 - }) - + return powertrain.InputContainer({"duty": 1, "v": 23}) + # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - simulated_results = powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + simulated_results = powertrain.simulate_to( + 2, future_loading, dt=2e-5, save_freq=0.1, print=True + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/3c5224a1564fb7226c7f5deb8cdd9f3e/prog_model_template.py b/docs/_downloads/3c5224a1564fb7226c7f5deb8cdd9f3e/prog_model_template.py index 47d16ed7..68559d2b 100644 --- a/docs/_downloads/3c5224a1564fb7226c7f5deb8cdd9f3e/prog_model_template.py +++ b/docs/_downloads/3c5224a1564fb7226c7f5deb8cdd9f3e/prog_model_template.py @@ -14,15 +14,14 @@ from prog_models import PrognosticsModel + # REPLACE THIS WITH DERIVED PARAMETER CALLBACKS (IF ANY) # See examples.derived_params # # Each function defines one or more derived parameters as a function of the other parameters. def example_callback(params): # Return format: dict of key: new value pair for at least one derived parameter - return { - "Example Parameter 1": params["Example Parameter 2"]-3 - } + return {"Example Parameter 1": params["Example Parameter 2"] - 3} class ProgModelTemplate(PrognosticsModel): @@ -34,58 +33,50 @@ class ProgModelTemplate(PrognosticsModel): # is_vectorized = True # REPLACE THE FOLLOWING LIST WITH EVENTS BEING PREDICTED - events = [ - 'Example Event' - ] - + events = ["Example Event"] + # REPLACE THE FOLLOWING LIST WITH INPUTS (LOADING) - inputs = [ - 'Example Input 1', - 'Example Input 2' - ] + inputs = ["Example Input 1", "Example Input 2"] # REPLACE THE FOLLOWING LIST WITH STATES states = [ - 'Examples State 1', - 'Examples State 2', - 'Examples State 3', - 'Examples State 4' + "Examples State 1", + "Examples State 2", + "Examples State 3", + "Examples State 4", ] # REPLACE THE FOLLOWING LIST WITH OUTPUTS (MEASURED VALUES) - outputs = [ - 'Example Output 1', - 'Example Output 2' - ] + outputs = ["Example Output 1", "Example Output 2"] # REPLACE THE FOLLOWING LIST WITH PERFORMANCE METRICS # i.e., NON-MEASURED VALUES THAT ARE A FUNCTION OF STATE # e.g., maximum torque of a motor performance_metric_keys = [ - 'metric 1', + "metric 1", ] # REPLACE THE FOLLOWING LIST WITH CONFIGURED PARAMETERS # Note- everything required to configure the model # should be in parameters- this is to enable the serialization features default_parameters = { # Set default parameters - 'Example Parameter 1': 0, - 'Example Parameter 2': 3, - 'process_noise': 0.1, # Process noise - 'x0': { # Initial state - 'Examples State 1': 1.5, - 'Examples State 2': -935, - 'Examples State 3': 42.1, - 'Examples State 4': 0 - } + "Example Parameter 1": 0, + "Example Parameter 2": 3, + "process_noise": 0.1, # Process noise + "x0": { # Initial state + "Examples State 1": 1.5, + "Examples State 2": -935, + "Examples State 3": 42.1, + "Examples State 4": 0, + }, } # REPLACE THE FOLLOWING WITH STATE BOUNDS IF NEEDED state_limits = { # 'state': (lower_limit, upper_limit) # only specify for states with limits - 'Examples State 1': (0, inf), - 'Examples State 4': (-2, 3) + "Examples State 1": (0, inf), + "Examples State 4": (-2, 3), } # Identify callbacks used by this model @@ -95,9 +86,7 @@ class ProgModelTemplate(PrognosticsModel): # And callbacks are one or more callback functions that define parameters that are # derived from that parameter # REPLACE THIS WITH ACTUAL DERIVED PARAMETER CALLBACKS - param_callbacks = { - "Example Parameter 2": [example_callback] - } + param_callbacks = {"Example Parameter 2": [example_callback]} # UNCOMMENT THIS FUNCTION IF YOU NEED CONSTRUCTION LOGIC (E.G., INPUT VALIDATION) # def __init__(self, **kwargs): @@ -176,7 +165,7 @@ class ProgModelTemplate(PrognosticsModel): # dx : StateContainer # First derivative of state, with keys defined by model.states # e.g., dx = {'abc': 3.1, 'def': -2.003} given states = ['abc', 'def'] - # + # # Example # ------- # | m = DerivProgModel() # Replace with specific model being simulated @@ -233,7 +222,7 @@ def output(self, x): x : StateContainer state, with keys defined by model.states e.g., x = {'abc': 332.1, 'def': 221.003} given states = ['abc', 'def'] - + Returns ------- z : OutputContainer @@ -243,10 +232,7 @@ def output(self, x): # REPLACE BELOW WITH LOGIC TO CALCULATE OUTPUTS # NOTE: KEYS FOR z MATCH 'outputs' LIST ABOVE - z = self.OutputContainer({ - 'Example Output 1': 0.0, - 'Example Output 2': 0.0 - }) + z = self.OutputContainer({"Example Output 1": 0.0, "Example Output 2": 0.0}) return z @@ -259,7 +245,7 @@ def event_state(self, x): x : StateContainer state, with keys defined by model.states e.g., x = {'abc': 332.1, 'def': 221.003} given states = ['abc', 'def'] - + Returns ------- event_state : dict @@ -269,12 +255,10 @@ def event_state(self, x): # REPLACE BELOW WITH LOGIC TO CALCULATE EVENT STATES # NOTE: KEYS FOR event_x MATCH 'events' LIST ABOVE - event_x = { - 'Example Event': 0.95 - } + event_x = {"Example Event": 0.95} return event_x - + # Note: Thresholds met equation below is not strictly necessary. # By default, threshold_met will check if event_state is ≤ 0 for each event def threshold_met(self, x): @@ -286,7 +270,7 @@ def threshold_met(self, x): x : StateContainer state, with keys defined by model.states e.g., x = {'abc': 332.1, 'def': 221.003} given states = ['abc', 'def'] - + Returns ------- thresholds_met : dict @@ -296,9 +280,7 @@ def threshold_met(self, x): # REPLACE BELOW WITH LOGIC TO CALCULATE IF THRESHOLDS ARE MET # NOTE: KEYS FOR t_met MATCH 'events' LIST ABOVE - t_met = { - 'Example Event': False - } + t_met = {"Example Event": False} return t_met @@ -311,7 +293,7 @@ def performance_metrics(self, x) -> dict: x : StateContainer state, with keys defined by model.states \n e.g., x = m.StateContainer({'abc': 332.1, 'def': 221.003}) given states = ['abc', 'def'] - + Returns ------- pm : dict @@ -329,9 +311,7 @@ def performance_metrics(self, x) -> dict: # REPLACE BELOW WITH LOGIC TO CALCULATE PERFORMANCE METRICS # NOTE: KEYS FOR p_metrics MATCH 'performance_metric_keys' LIST ABOVE - p_metrics = { - 'metric1': 23 - } + p_metrics = {"metric1": 23} return p_metrics # V UNCOMMENT THE BELOW FUNCTION FOR DIRECT FUNCTIONS V diff --git a/docs/_downloads/3cfbcbfed775f065c07dae123f7e63a1/growth.py b/docs/_downloads/3cfbcbfed775f065c07dae123f7e63a1/growth.py index 28587444..7411784a 100644 --- a/docs/_downloads/3cfbcbfed775f065c07dae123f7e63a1/growth.py +++ b/docs/_downloads/3cfbcbfed775f065c07dae123f7e63a1/growth.py @@ -2,35 +2,36 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ - Example demonstrating the Paris Law Crack Growth Equation +Example demonstrating the Paris Law Crack Growth Equation """ -from progpy.models.paris_law import ParisLawCrackGrowth +from progpy.models.paris_law import ParisLawCrackGrowth import matplotlib.pyplot as plt import csv import os -def run_example(): + +def run_example(): # Step 1: Create a model object - m = ParisLawCrackGrowth(process_noise = 0) - - # Step 2: Define future loading function + m = ParisLawCrackGrowth(process_noise=0) + + # Step 2: Define future loading function def future_loading(t, x=None): - #variable (piece-wise) future loading scheme - #inputs are ['k_min', 'k_max'] - if (t < 500): + # variable (piece-wise) future loading scheme + # inputs are ['k_min', 'k_max'] + if t < 500: k_min = 12 k_max = 24 - elif (t < 750): + elif t < 750: k_min = 8 k_max = 32 else: k_min = 0 k_max = 28 - return m.InputContainer({'k_min': k_min, 'k_max': k_max}) + return m.InputContainer({"k_min": k_min, "k_max": k_max}) # Step 3: Estimate parameters - # We do not know the model parameters for this system, + # We do not know the model parameters for this system, # so we need to estimate it using data collected from the system # First we have to import some data from the real system # This is what we use to estimate parameters @@ -38,55 +39,60 @@ def future_loading(t, x=None): inputs = [] outputs = [] - #Finds file path - csv_dir = os.path.join(os.path.dirname(__file__), 'growth.csv') + # Finds file path + csv_dir = os.path.join(os.path.dirname(__file__), "growth.csv") - #Reads csv file + # Reads csv file try: - with open(csv_dir, newline='') as csvfile: - data = csv.reader(csvfile, delimiter=',', quotechar='|' , quoting=csv.QUOTE_NONNUMERIC) + with open(csv_dir, newline="") as csvfile: + data = csv.reader( + csvfile, delimiter=",", quotechar="|", quoting=csv.QUOTE_NONNUMERIC + ) for row in data: times.append(row[0]) - inputs.append({'k_min': row[1], 'k_max': row[2]}) - outputs.append({'c_l': row[3]}) + inputs.append({"k_min": row[1], "k_max": row[2]}) + outputs.append({"c_l": row[3]}) except FileNotFoundError: print("No data file found") # Estimates the model parameters - keys = ['c', 'm'] + keys = ["c", "m"] - print('Model configuration before') + print("Model configuration before") for key in keys: print("-", key, m.parameters[key]) - print(' Error: ', m.calc_error(times, inputs, outputs, dt=10)) + print(" Error: ", m.calc_error(times, inputs, outputs, dt=10)) m.estimate_params([(times, inputs, outputs)], keys, dt=10) - print('\nOptimized configuration') + print("\nOptimized configuration") for key in keys: print("-", key, m.parameters[key]) - print(' Error: ', m.calc_error(times, inputs, outputs, dt=10)) + print(" Error: ", m.calc_error(times, inputs, outputs, dt=10)) # Step 4: Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") options = { - 'save_freq': 10, # Frequency at which results are saved - 'dt': 10, # Timestep - 'print': True, - 'horizon': 1e5, # Horizon + "save_freq": 10, # Frequency at which results are saved + "dt": 10, # Timestep + "print": True, + "horizon": 1e5, # Horizon } - (times, inputs, _, outputs, event_states) = m.simulate_to_threshold(future_loading, **options) + (times, inputs, _, outputs, event_states) = m.simulate_to_threshold( + future_loading, **options + ) # Step 5: Plot Results # crack length # plot event state - inputs.plot(ylabel='Stress Intensity') - event_states.plot(ylabel= 'CGF') - outputs.plot(ylabel= {'c_l': "Crack Length"}, compact= False) + inputs.plot(ylabel="Stress Intensity") + event_states.plot(ylabel="CGF") + outputs.plot(ylabel={"c_l": "Crack Length"}, compact=False) plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/3d792d3f35f8f6ea97ee4e47fcc21d55/sim.ipynb b/docs/_downloads/3d792d3f35f8f6ea97ee4e47fcc21d55/sim.ipynb index 717dcfe4..90b10435 100644 --- a/docs/_downloads/3d792d3f35f8f6ea97ee4e47fcc21d55/sim.ipynb +++ b/docs/_downloads/3d792d3f35f8f6ea97ee4e47fcc21d55/sim.ipynb @@ -1,54 +1,112 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample of a battery being simulated for a set period of time and then till threshold is met.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models import BatteryCircuit as Battery\n# VVV Uncomment this to use Electro Chemistry Model VVV\n# from progpy.models import BatteryElectroChem as Battery\n\ndef run_example(): \n # Step 1: Create a model object\n batt = Battery()\n\n # Step 2: Define future loading function \n def future_loading(t, x=None):\n # Variable (piece-wise) future loading scheme \n if (t < 600):\n i = 2\n elif (t < 900):\n i = 1\n elif (t < 1800):\n i = 4\n elif (t < 3000):\n i = 2 \n else:\n i = 3\n return batt.InputContainer({'i': i})\n # simulate for 200 seconds\n print('\\n\\n------------------------------------------------')\n print('Simulating for 200 seconds\\n\\n')\n simulated_results = batt.simulate_to(200, future_loading, print = True, progress = True)\n\n # Simulate to threshold\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n options = {\n 'save_freq': 100, # Frequency at which results are saved\n 'dt': 2, # Timestep\n 'print': True,\n 'progress': True\n }\n simulated_results = batt.simulate_to_threshold(future_loading, **options)\n\n # Alternately, you can set a max step size and allow step size to be adjusted automatically\n options['dt'] = ('auto', 2) # set step size automatically, with a max of 2 seconds\n options['save_freq'] = 201 # Save every 201 seconds\n options['save_pts'] = [250, 772, 1023] # Special points we sould like to see reported\n simulated_results = batt.simulate_to_threshold(future_loading, **options)\n # Note that even though the step size is 2, the odd points in the save frequency are met perfectly, dt is adjusted automatically to capture the save points\n\n # You can also change the integration method. For example:\n options['integration_method'] = 'rk4' # Using Runge-Kutta 4th order\n simulated_results_rk4 = batt.simulate_to_threshold(future_loading, **options)\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample of a battery being simulated for a set period of time and then till threshold is met.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit as Battery\n", + "# VVV Uncomment this to use Electro Chemistry Model VVV\n", + "# from progpy.models import BatteryElectroChem as Battery\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Create a model object\n", + " batt = Battery()\n", + "\n", + " # Step 2: Define future loading function\n", + " def future_loading(t, x=None):\n", + " # Variable (piece-wise) future loading scheme\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return batt.InputContainer({\"i\": i})\n", + "\n", + " # simulate for 200 seconds\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating for 200 seconds\\n\\n\")\n", + " simulated_results = batt.simulate_to(200, future_loading, print=True, progress=True)\n", + "\n", + " # Simulate to threshold\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " options = {\n", + " \"save_freq\": 100, # Frequency at which results are saved\n", + " \"dt\": 2, # Timestep\n", + " \"print\": True,\n", + " \"progress\": True,\n", + " }\n", + " simulated_results = batt.simulate_to_threshold(future_loading, **options)\n", + "\n", + " # Alternately, you can set a max step size and allow step size to be adjusted automatically\n", + " options[\"dt\"] = (\"auto\", 2) # set step size automatically, with a max of 2 seconds\n", + " options[\"save_freq\"] = 201 # Save every 201 seconds\n", + " options[\"save_pts\"] = [\n", + " 250,\n", + " 772,\n", + " 1023,\n", + " ] # Special points we sould like to see reported\n", + " simulated_results = batt.simulate_to_threshold(future_loading, **options)\n", + " # Note that even though the step size is 2, the odd points in the save frequency are met perfectly, dt is adjusted automatically to capture the save points\n", + "\n", + " # You can also change the integration method. For example:\n", + " options[\"integration_method\"] = \"rk4\" # Using Runge-Kutta 4th order\n", + " simulated_results_rk4 = batt.simulate_to_threshold(future_loading, **options)\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/3e4a5d9b856c25c6b0bf84730581c560/generate_surrogate.py b/docs/_downloads/3e4a5d9b856c25c6b0bf84730581c560/generate_surrogate.py index b9b9e23f..359df815 100644 --- a/docs/_downloads/3e4a5d9b856c25c6b0bf84730581c560/generate_surrogate.py +++ b/docs/_downloads/3e4a5d9b856c25c6b0bf84730581c560/generate_surrogate.py @@ -6,166 +6,199 @@ .. dropdown:: More details - In this example, an instance of a battery model is created. The DMD DataModel is used to generate a surrogate of this battery model for specific loading schemes. This surrogate can be used in place of the original model, approximating it's behavior. Frequently, surrogate models run faster than the original, at the cost of some accuracy. The performance of the two models are then compared. + In this example, an instance of a battery model is created. The DMD DataModel is used to generate a surrogate of this battery model for specific loading schemes. This surrogate can be used in place of the original model, approximating it's behavior. Frequently, surrogate models run faster than the original, at the cost of some accuracy. The performance of the two models are then compared. """ import matplotlib.pyplot as plt from prog_models.models import BatteryElectroChemEOD as Battery -def run_example(): - ### Example 1: Standard DMD Application + +def run_example(): + ### Example 1: Standard DMD Application ## Step 1: Create a model object batt = Battery() - ## Step 2: Define future loading functions for training data - # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired + ## Step 2: Define future loading functions for training data + # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired def future_loading_1(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 500): + # Variable (piece-wise) future loading scheme + if t < 500: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 0.5 else: i = 4.5 - return batt.InputContainer({'i': i}) - + return batt.InputContainer({"i": i}) + def future_loading_2(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 300): + # Variable (piece-wise) future loading scheme + if t < 300: i = 2 - elif (t < 800): + elif t < 800: i = 3.5 - elif (t < 1300): + elif t < 1300: i = 4 - elif (t < 1600): + elif t < 1600: i = 1.5 else: i = 5 - return batt.InputContainer({'i': i}) - + return batt.InputContainer({"i": i}) + load_functions = [future_loading_1, future_loading_2] - ## Step 3: generate surrogate model + ## Step 3: generate surrogate model # Simulation options for training data and surrogate model generation # Note: here dt is less than save_freq. This means the model will iterate forward multiple steps per saved point. - # This is commonly done to ensure accuracy. + # This is commonly done to ensure accuracy. options_surrogate = { - 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated - 'dt': 0.1, # For DMD, this value is the time step of the training data - 'trim_data_to': 0.7 # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data_to": 0.7, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model } # Set noise in Prognostics Model, default for surrogate model is also this value - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 - # Generate surrogate model - surrogate = batt.generate_surrogate(load_functions,**options_surrogate) + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) - ## Step 4: Use surrogate model + ## Step 4: Use surrogate model # Simulation options for implementation of surrogate model options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } - # Define loading profile + # Define loading profile def future_loading(t, x=None): - if (t < 600): + if t < 600: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 1.5 else: i = 4 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Calculate Error - MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs) - print('Example 1 MSE:',MSE) + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 1 MSE:", MSE) # Not a very good approximation # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 1 Input') - simulated_results.outputs.plot(ylabel = 'Predicted Outputs (temperature and voltage)',title='Example 1 Predicted Outputs') - simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 1 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 1 Input") + simulated_results.outputs.plot( + ylabel="Predicted Outputs (temperature and voltage)", + title="Example 1 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 1 Predicted SOC" + ) # To visualize the accuracy of the approximation, run the high-fidelity model options_hf = { - 'dt': 0.1, - 'save_freq': 1, + "dt": 0.1, + "save_freq": 1, } - high_fidelity_results = batt.simulate_to_threshold(future_loading,**options_hf) + high_fidelity_results = batt.simulate_to_threshold(future_loading, **options_hf) # Save voltage results to compare - voltage_dmd = [simulated_results.outputs[iter1]['v'] for iter1 in range(len(simulated_results.times))] - voltage_hf = [high_fidelity_results.outputs[iter2]['v'] for iter2 in range(len(high_fidelity_results.times))] + voltage_dmd = [ + simulated_results.outputs[iter1]["v"] + for iter1 in range(len(simulated_results.times)) + ] + voltage_hf = [ + high_fidelity_results.outputs[iter2]["v"] + for iter2 in range(len(high_fidelity_results.times)) + ] plt.subplots() - plt.plot(simulated_results.times,voltage_dmd,'-b',label='DMD approximation') - plt.plot(high_fidelity_results.times, voltage_hf,'--r',label='High fidelity result') + plt.plot(simulated_results.times, voltage_dmd, "-b", label="DMD approximation") + plt.plot( + high_fidelity_results.times, voltage_hf, "--r", label="High fidelity result" + ) plt.legend() - plt.title('Comparing DMD approximation to high-fidelity model results') + plt.title("Comparing DMD approximation to high-fidelity model results") - ### Example 2: Add process_noise to the surrogate model - # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate) - surrogate.parameters['process_noise'] = 1e-04 - surrogate.parameters['process_noise_dist'] = 'normal' + ### Example 2: Add process_noise to the surrogate model + # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate) + surrogate.parameters["process_noise"] = 1e-04 + surrogate.parameters["process_noise_dist"] = "normal" - # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + # Simulate to threshold using DMD approximation + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 2 Input') - simulated_results.outputs.plot(keys=['v'],ylabel = 'Predicted Voltage (volts)', title='Example 2 Predicted Outputs') - simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 2 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 2 Input") + simulated_results.outputs.plot( + keys=["v"], + ylabel="Predicted Voltage (volts)", + title="Example 2 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 2 Predicted SOC" + ) ### Example 3: Generate surrogate model with a subset of internal states, inputs, and/or outputs - # Note: we use the same loading profiles as defined in Ex. 1 + # Note: we use the same loading profiles as defined in Ex. 1 - ## Generate surrogate model + ## Generate surrogate model # Simulation options for training data and surrogate model generation options_surrogate = { - 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated - 'dt': 0.1, # For DMD, this value is the time step of the training data - 'trim_data': 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model - 'state_keys': ['Vsn','Vsp','tb'], # Define internal states to be included in surrogate model - 'output_keys': ['v'] # Define outputs to be included in surrogate model + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data": 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + "state_keys": [ + "Vsn", + "Vsp", + "tb", + ], # Define internal states to be included in surrogate model + "output_keys": ["v"], # Define outputs to be included in surrogate model } # Set noise in Prognostics Model, default for surrogate model is also this value - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 - # Generate surrogate model - surrogate = batt.generate_surrogate(load_functions,**options_surrogate) + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) - ## Use surrogate model - # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. - # The surrogate model results will be faster but less accurate than the original model. + ## Use surrogate model + # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. + # The surrogate model results will be faster but less accurate than the original model. # Simulation options for implementation of surrogate model options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Calculate Error - MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs) - print('Example 3 MSE:',MSE) + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 3 MSE:", MSE) # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 3 Input') - simulated_results.outputs.plot(ylabel = 'Outputs (voltage)',title='Example 3 Predicted Output') - simulated_results.event_states.plot(ylabel = 'State of Charge',title='Example 3 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 3 Input") + simulated_results.outputs.plot( + ylabel="Outputs (voltage)", title="Example 3 Predicted Output" + ) + simulated_results.event_states.plot( + ylabel="State of Charge", title="Example 3 Predicted SOC" + ) plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/4020f035b915e34fe49e1d40b9aff42b/particle_filter_battery_example.py b/docs/_downloads/4020f035b915e34fe49e1d40b9aff42b/particle_filter_battery_example.py index 737e4f86..4d676da3 100644 --- a/docs/_downloads/4020f035b915e34fe49e1d40b9aff42b/particle_filter_battery_example.py +++ b/docs/_downloads/4020f035b915e34fe49e1d40b9aff42b/particle_filter_battery_example.py @@ -9,6 +9,7 @@ from prog_algs import * from progpy.models import BatteryElectroChemEOD + def run_example(): ## Setup # Save battery model @@ -16,24 +17,22 @@ def run_example(): dt = 1 # Process noise Q_vars = { - 'tb': 1, - 'Vo': 0.01, - 'Vsn': 0.01, - 'Vsp': 0.01, - 'qnB': 1, - 'qnS': 1, - 'qpB': 1, - 'qpS': 1 + "tb": 1, + "Vo": 0.01, + "Vsn": 0.01, + "Vsp": 0.01, + "qnB": 1, + "qnS": 1, + "qpB": 1, + "qpS": 1, } # Measurement noise - R_vars = { - 't': 2, - 'v': 0.02 - } - battery = BatteryElectroChemEOD(process_noise= Q_vars, - measurement_noise = R_vars, - dt = dt) + R_vars = {"t": 2, "v": 0.02} + battery = BatteryElectroChemEOD( + process_noise=Q_vars, measurement_noise=R_vars, dt=dt + ) load = battery.InputContainer({"i": 1}) # Optimization + def future_loading(t, x=None): return load @@ -41,33 +40,70 @@ def future_loading(t, x=None): start_u = future_loading(0) start_x = battery.initialize(start_u) start_y = battery.output(start_x) - sim_results = battery.simulate_to_threshold(future_loading, start_y, save_freq = 1) + sim_results = battery.simulate_to_threshold(future_loading, start_y, save_freq=1) # Run particle filter all_particles = [] - n_times = int(np.round(np.random.uniform(len(sim_results.times)*.25,len(sim_results.times)*.45,1)))# Random current time + n_times = int( + np.round( + np.random.uniform( + len(sim_results.times) * 0.25, len(sim_results.times) * 0.45, 1 + ) + ) + ) # Random current time for i in range(n_times): if i == 0: - batt_pf = state_estimators.ParticleFilter(model = battery, x0 = sim_results.states[i], num_particles = 250) + batt_pf = state_estimators.ParticleFilter( + model=battery, x0=sim_results.states[i], num_particles=250 + ) else: - batt_pf.estimate(t = sim_results.times[i], u = sim_results.inputs[i], z = sim_results.outputs[i]) + batt_pf.estimate( + t=sim_results.times[i], + u=sim_results.inputs[i], + z=sim_results.outputs[i], + ) all_particles.append(batt_pf.particles) # Mean of the particles alpha = 0.05 - states_vsn = [s['tb'] for s in sim_results.states] - pf_mean = [{key: np.mean(ps[key]) for key in battery.states} for ps in all_particles] - pf_low = [{key: np.quantile(ps[key], alpha / 2.0) for key in battery.states} for ps in all_particles] - pf_upp = [{key: np.quantile(ps[key], 1.0 - alpha / 2.0) for key in battery.states} for ps in all_particles] + states_vsn = [s["tb"] for s in sim_results.states] + pf_mean = [ + {key: np.mean(ps[key]) for key in battery.states} for ps in all_particles + ] + pf_low = [ + {key: np.quantile(ps[key], alpha / 2.0) for key in battery.states} + for ps in all_particles + ] + pf_upp = [ + {key: np.quantile(ps[key], 1.0 - alpha / 2.0) for key in battery.states} + for ps in all_particles + ] print("First State:", pf_mean[0]) print("Current State:", pf_mean[-1]) - plt.plot(sim_results.times[:n_times],[p['tb'] for p in pf_mean],linewidth=0.7,color="blue") - plt.plot(sim_results.times[:n_times], states_vsn[:n_times],"--",linewidth=0.7,color="red") - plt.fill_between(sim_results.times[:n_times],[p['tb'] for p in pf_low],[p['tb'] for p in pf_upp],alpha=0.5,color="blue") + plt.plot( + sim_results.times[:n_times], + [p["tb"] for p in pf_mean], + linewidth=0.7, + color="blue", + ) + plt.plot( + sim_results.times[:n_times], + states_vsn[:n_times], + "--", + linewidth=0.7, + color="red", + ) + plt.fill_between( + sim_results.times[:n_times], + [p["tb"] for p in pf_low], + [p["tb"] for p in pf_upp], + alpha=0.5, + color="blue", + ) plt.show() - -# This allows the module to be executed directly -if __name__ == '__main__': - run_example() \ No newline at end of file + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/40eed4cd02f1dee80e6faaa22f99843b/composite_model.py b/docs/_downloads/40eed4cd02f1dee80e6faaa22f99843b/composite_model.py index cd8772c6..bf6baa66 100644 --- a/docs/_downloads/40eed4cd02f1dee80e6faaa22f99843b/composite_model.py +++ b/docs/_downloads/40eed4cd02f1dee80e6faaa22f99843b/composite_model.py @@ -4,12 +4,13 @@ """ Example illustrating how to use the CompositeModel class to create a composite model from multiple models. -This example creates a composite model of a DC motor with an Electronic Speed Controller and a propeller load. The three composite models are interrelated. The created composite model describes the nature of these interconnections. The resulting powertrain model is then simulated forward with time and the results are plotted. +This example creates a composite model of a DC motor with an Electronic Speed Controller and a propeller load. The three composite models are interrelated. The created composite model describes the nature of these interconnections. The resulting powertrain model is then simulated forward with time and the results are plotted. """ from progpy.models import DCMotor, ESC, PropellerLoad from prog_models import CompositeModel + def run_example(): # First, lets define the composite models m_motor = DCMotor() @@ -19,35 +20,37 @@ def run_example(): # Now let's combine them into a single composite model describing the behavior of a powertrain # This model will then behave as a single model m_powertrain = CompositeModel( - (m_esc, m_load, m_motor), - connections = [ - ('DCMotor.theta', 'ESC.theta'), - ('ESC.v_a', 'DCMotor.v_a'), - ('ESC.v_b', 'DCMotor.v_b'), - ('ESC.v_c', 'DCMotor.v_c'), - ('PropellerLoad.t_l', 'DCMotor.t_l'), - ('DCMotor.v_rot', 'PropellerLoad.v_rot')], - outputs = {'DCMotor.v_rot', 'DCMotor.theta'}) - + (m_esc, m_load, m_motor), + connections=[ + ("DCMotor.theta", "ESC.theta"), + ("ESC.v_a", "DCMotor.v_a"), + ("ESC.v_b", "DCMotor.v_b"), + ("ESC.v_c", "DCMotor.v_c"), + ("PropellerLoad.t_l", "DCMotor.t_l"), + ("DCMotor.v_rot", "PropellerLoad.v_rot"), + ], + outputs={"DCMotor.v_rot", "DCMotor.theta"}, + ) + # Print out the inputs, states, and outputs of the composite model - print('Composite model of DCMotor, ESC, and Propeller load') - print('inputs: ', m_powertrain.inputs) - print('states: ', m_powertrain.states) - print('outputs: ', m_powertrain.outputs) + print("Composite model of DCMotor, ESC, and Propeller load") + print("inputs: ", m_powertrain.inputs) + print("states: ", m_powertrain.states) + print("outputs: ", m_powertrain.outputs) # Define future loading function - 100% duty all the time def future_loading(t, x=None): - return m_powertrain.InputContainer({ - 'ESC.duty': 1, - 'ESC.v': 23 - }) - + return m_powertrain.InputContainer({"ESC.duty": 1, "ESC.v": 23}) + # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - simulated_results = m_powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + simulated_results = m_powertrain.simulate_to( + 2, future_loading, dt=2e-5, save_freq=0.1, print=True + ) simulated_results.outputs.plot() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/412560afd93e3e54078df8279ff54887/04_New Models.ipynb b/docs/_downloads/412560afd93e3e54078df8279ff54887/04_New Models.ipynb index 4618ee3f..328e7eda 100644 --- a/docs/_downloads/412560afd93e3e54078df8279ff54887/04_New Models.ipynb +++ b/docs/_downloads/412560afd93e3e54078df8279ff54887/04_New Models.ipynb @@ -4,14 +4,39 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 4. Defining new Physics-Based Prognostic Models" + "# 4. Defining New Physics-Based Prognostic Models" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "All of the past sections describe how to use an existing model. In this section we will describe how to create a new model. This section specifically describes creating a new physics-based model. For training and creating data-driven models see 5. Data-driven Models." + "All of the previous sections describe how to use an existing model. In this section, we will explore how to create a new physics-based model. \n", + "\n", + "A physics-based model is a model where behavior is described by the physics of the system. Physics-based models are typically parameterized, so that exact behavior of the system can be configured or learned (through parameter estimation). \n", + "\n", + "For training and creating data-driven models, see __[05 Data Driven](05_Data%20Driven.ipynb)__." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Linear Models](#Linear-Models)\n", + "* [State Transition Models](#State-Transition-Models)\n", + "* [Direct Models](#Direct-Models)\n", + "* [Advanced Features](#Advanced-Features)\n", + " * [Derived Parameters](#Derived-Parameters)\n", + " * [Matrix Data Access](#Matrix-Data-Access)\n", + " * [State Limits](#State-Limits)\n", + " * [Custom Events](#Custom-Events)\n", + " * [Serialization](#Serialization)\n", + "* [Simplified Battery Model Example](#Simplified-Battery-Model-Example)\n", + " * [State Transition](#State-Transition)\n", + " * [Outputs](#Outputs)\n", + " * [Events](#Events)\n", + "* [Conclusion](#Conclusion)" ] }, { @@ -46,7 +71,7 @@ "\n", "$x$ is `state`, $u$ is `input`, $z$ is `output`, and $es$ is `event state`\n", "\n", - "Linear Models are defined by creating a new model class that inherits from progpy's LinearModel class and defines the following properties:\n", + "Linear models are defined by creating a new model class that inherits from progpy's `LinearModel` class and defines the following properties:\n", "* $A$: 2-D np.array[float], dimensions: n_states x n_states. The state transition matrix. It dictates how the current state affects the change in state dx/dt.\n", "* $B$: 2-D np.array[float], optional (zeros by default), dimensions: n_states x n_inputs. The input matrix. It dictates how the input affects the change in state dx/dt.\n", "* $C$: 2-D np.array[float], dimensions: n_outputs x n_states. The output matrix. It determines how the state variables contribute to the output.\n", @@ -64,25 +89,23 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will now utilize our LinearModel to model the classical physics problem throwing an object into the air. This is a common example model, the non-linear version of which (`progpy.examples.ThrownObject`) has been used frequently throughout the examples. This version of ThrownObject will behave nearly identically to the non-linear ThrownObject, except it will not have the non-linear effects of air resistance.\n", + "We will now utilize our `LinearModel` to model the classical physics problem throwing an object into the air. This is a common example model, the non-linear version of which (`progpy.examples.ThrownObject`) has been used frequently throughout the examples. This version of ThrownObject will behave almost identically to the non-linear `ThrownObject`, except it will not have the non-linear effects of air resistance.\n", "\n", - "We can create a subclass of LinearModel which will be used to simulate an object thrown, which we will call the ThrownObject Class.\n", - "\n", - "First, some definitions for our Model:\n", + "We can create a subclass of `LinearModel` to simulate an object thrown, which we will call the `ThrownObject` class. Let's start with some definitions for our model:\n", "\n", "**Events**: (2)\n", - "* `falling: The object is falling`\n", - "* `impact: The object has hit the ground`\n", + "* `falling`: The object is falling\n", + "* `impact`: The object has hit the ground\n", "\n", "**Inputs/Loading**: (0)\n", "* `None`\n", "\n", "**States**: (2)\n", - "* `x: Position in space (m)`\n", - "* `v: Velocity in space (m/s)`\n", + "* `x`: Position in space (m)\n", + "* `v`: Velocity in space (m/s)\n", "\n", "**Outputs/Measurements**: (1)\n", - "* `x: Position in space (m)`" + "* `x`: Position in space (m)" ] }, { @@ -101,7 +124,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With our definitions, we can now create the ThrownObject Model.\n", + "With our definitions, we can now create the `ThrownObject` model.\n", "\n", "First, we need to import the necessary packages." ] @@ -120,9 +143,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we'll define some features of a ThrownObject LinearModel. Recall that all LinearModels follow a set of core equations and require some specific properties (see above). In the next step, we'll define our inputs, states, outputs, and events, along with the $A$, $C$, $E$, and $F$ values.\n", + "Now we'll define some features of a `ThrownObject` `LinearModel`. Recall that all `LinearModel` classes follow a set of core equations and require some specific properties, as noted earlier. In this next step, we'll define our inputs, states, outputs, and events, along with the $A$, $C$, $E$, and $F$ values.\n", "\n", - "First, let's consider state transition. For an object thrown into the air without air resistance, velocity would decrease linearly by __-9.81__ \n", + "First, let's consider state transition. For an object thrown into the air without air resistance, velocity would decrease linearly by -9.81 \n", "$\\dfrac{m}{s^2}$ due to the effect of gravity, as described below:\n", "\n", " $$\\frac{dv}{dt} = -9.81$$\n", @@ -131,7 +154,7 @@ " \n", " $$\\frac{dx}{dt} = v$$\n", "\n", - " Note: For the above equation x is position not state. Combining these equations with the model $\\frac{dx}{dt}$ equation defined above yields the A and E matrix defined below. Note that there is no B defined because this model does not have any inputs." + "For the above equation x is position not state. Combining these equations with the model $\\frac{dx}{dt}$ equation defined above yields the $A$ and $E$ matrix defined below. There is no $B$ defined because this model does not have any inputs." ] }, { @@ -141,11 +164,11 @@ "outputs": [], "source": [ "class ThrownObject(LinearModel):\n", - " events = ['impact']\n", - " inputs = [] \n", - " states = ['x', 'v']\n", - " outputs = ['x']\n", - " \n", + " events = [\"impact\"]\n", + " inputs = []\n", + " states = [\"x\", \"v\"]\n", + " outputs = [\"x\"]\n", + "\n", " A = np.array([[0, 1], [0, 0]])\n", " C = np.array([[1, 0]])\n", " E = np.array([[0], [-9.81]])\n", @@ -156,9 +179,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note that we defined our `A`, `C`, `E`, and `F` values to fit the dimensions that were stated at the beginning of the notebook! Since the parameter `F` is not optional, we have to explicitly set the value as __None__.\n", + "Note that we defined our $A$, $C$, $E$, and $F$ values to fit the dimensions that were stated at the beginning of the notebook! Since the parameter $F$ is not optional, we have to explicitly set the value as `None`.\n", "\n", - "Next, we'll define some default parameters for our ThrownObject model." + "Next, we'll define some default parameters for our `ThrownObject` model." ] }, { @@ -169,8 +192,8 @@ "source": [ "class ThrownObject(ThrownObject): # Continue the ThrownObject class\n", " default_parameters = {\n", - " 'thrower_height': 1.83,\n", - " 'throwing_speed': 40,\n", + " \"thrower_height\": 1.83,\n", + " \"throwing_speed\": 40,\n", " }" ] }, @@ -180,7 +203,7 @@ "source": [ "In the following cells, we'll define some class functions necessary to perform prognostics on the model.\n", "\n", - "The `initialize()` function sets the initial system state. Since we have defined the `x` and `v` values for our ThrownObject model to represent position and velocity in space, our initial values would be the thrower_height and throwing_speed parameters, respectively." + "The `initialize()` function sets the initial system state. Since we have defined the `x` and `v` values for our `ThrownObject` model to represent position and velocity in space, our initial values would be the `thrower_height` and `throwing_speed` parameters, respectively." ] }, { @@ -191,19 +214,18 @@ "source": [ "class ThrownObject(ThrownObject):\n", " def initialize(self, u=None, z=None):\n", - " return self.StateContainer({\n", - " 'x': self['thrower_height'],\n", - " 'v': self['throwing_speed']\n", - " })" + " return self.StateContainer(\n", + " {\"x\": self[\"thrower_height\"], \"v\": self[\"throwing_speed\"]}\n", + " )" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "For our `threshold_met()`, we define the function to return True for event 'falling' when our thrown object model has a velocity value of less than 0 (object is 'falling') and for event 'impact' when our thrown object has a distance from of the ground of less than or equal to 0 (object is on the ground, or has made 'impact').\n", + "For our `threshold_met()` equation, we will define the function to return `True` for event `falling` when our model has a velocity value less than 0 (object is 'falling') and for event `impact` when our thrown object has a distance from the ground less than or equal to 0 (object is on the ground, or has made 'impact').\n", "\n", - "`threshold_met()` returns a _dict_ of values, if each entry of the _dict_ is __True__, then our threshold has been met!" + "`threshold_met()` returns a _dict_ of values, if each entry of the _dict_ is true, then our threshold has been met." ] }, { @@ -214,17 +236,14 @@ "source": [ "class ThrownObject(ThrownObject):\n", " def threshold_met(self, x):\n", - " return {\n", - " 'falling': x['v'] < 0,\n", - " 'impact': x['x'] <= 0\n", - " }" + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, for our `event_state()`, we will calculate the measurement of progress towards the events. We normalize our values such that they are in the range of 0 to 1, where 0 means the event has occurred." + "Finally, for our `event_state()` equation, we will calculate the measurement of progress towards the events. We will normalize our values such that they are in the range of 0 to 1, where 0 means the event has occurred." ] }, { @@ -234,11 +253,11 @@ "outputs": [], "source": [ "class ThrownObject(ThrownObject):\n", - " def event_state(self, x): \n", - " x_max = x['x'] + np.square(x['v'])/(9.81*2)\n", + " def event_state(self, x):\n", + " x_max = x[\"x\"] + np.square(x[\"v\"]) / (9.81 * 2)\n", " return {\n", - " 'falling': np.maximum(x['v']/self['throwing_speed'],0),\n", - " 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1\n", + " \"falling\": np.maximum(x[\"v\"] / self[\"throwing_speed\"], 0),\n", + " \"impact\": np.maximum(x[\"x\"] / x_max, 0) if x[\"v\"] < 0 else 1,\n", " }" ] }, @@ -246,7 +265,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With these functions created, we can now use the `simulate_to_threshold()` function to simulate the movement of the thrown object in air. For more information, see 1. Simulation." + "With these functions created, we can now use the `simulate_to_threshold()` function to simulate the movement of the thrown object in air. Let's run the simulation. For more information, see __[01 Simulation](01_Simulation.ipynb)__." ] }, { @@ -256,16 +275,18 @@ "outputs": [], "source": [ "m = ThrownObject()\n", - "save = m.simulate_to_threshold(print=True, save_freq=1, events='impact', dt=0.1)" + "simulated_results = m.simulate_to_threshold(\n", + " print=True, save_freq=1, events=\"impact\", dt=0.1\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "__Note__: Because our model takes in no inputs, we have no need to define a future loading function. However, for most models, there would be inputs, and thus a need for a future loading function. For more information on future loading functions and when to use them, please refer to the future loading section in 1. Simulation.\n", + "Since our model takes in no inputs, we have no need to define a future loading function. However, for most models, there would be inputs, and thus a need for a future loading function. For more information on future loading functions and when to use them, please refer to the future loading section in __[01 Simulation](01_Simulation.ipynb)__.\n", "\n", - "Let's take a look at the outputs of this model" + "Let's take a look at the outputs of this model." ] }, { @@ -274,44 +295,40 @@ "metadata": {}, "outputs": [], "source": [ - "fig = save.outputs.plot(title='generated model')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Notice that that plot resembles a parabola, which represents the position of the ball through space as time progresses!" + "fig = simulated_results.outputs.plot(\n", + " title=\"ThrownObject model simulation output\",\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"position (m)\",\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "For more information on Linear Models, see the [Linear Model](https://nasa.github.io/progpy/api_ref/prog_models/LinearModel.html) Documentation." + "Notice that that plot resembles a parabola, which represents the position of the ball through space as time progresses. For more information on `LinearModel`, see the [LinearModel](https://nasa.github.io/progpy/api_ref/prog_models/LinearModel.html) documentation." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## New State Transition Models" + "## State Transition Models" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the previous section, we defined a new prognostic model using the LinearModel class. This can be a powerful tool for defining models that can be described as a linear time series. Physics-based state transition models that cannot be described linearly are constructed by subclassing [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel). To demonstrate this, we'll create a new model class that inherits from this class. Once constructed in this way, the analysis and simulation tools for PrognosticsModels will work on the new model.\n", + "In the previous section, we defined a new prognostic model using the `LinearModel` class. This can be a powerful tool for defining models that can be described as a linear time series. \n", "\n", - "For this example, we'll create a simple state-transition model of an object thrown upward into the air without air resistance. Note that this is the same dynamic system as the linear model example above, but formulated in a different way. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we'll import the necessary packages to create a general prognostics model." + "Physics-based state transition models that cannot be described linearly are constructed by subclassing `PrognosticsModel`. For more information, refer to the [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel) documentation. \n", + "\n", + "To demonstrate this, we'll create a new model class that inherits from `PrognosticsModel`. This inheritance allows the new model to use the analysis and simulation tools from `PrognosticsModel`.\n", + "\n", + "Let's create a simple state-transition model of an object thrown upwards in the air without air resistance. Note that this is the same dynamic system as the linear model example above, but formulated differently.\n", + "\n", + "First, we'll import the necessary packages and classes." ] }, { @@ -330,7 +347,7 @@ "source": [ "Next, we'll define our model class. PrognosticsModels require defining [inputs](https://nasa.github.io/progpy/glossary.html#term-input), [states](https://nasa.github.io/progpy/glossary.html#term-state), [outputs](https://nasa.github.io/progpy/glossary.html#term-output), and [event](https://nasa.github.io/progpy/glossary.html#term-event) keys. As in the above example, the states include position (`x`) and velocity(`v`) of the object, the output is position (`x`), and the events are `falling` and `impact`. \n", "\n", - "Note that we define this class as `ThrownObject_ST` to distinguish it as a state-transition model compared to the previous linear model class. " + "We will define this new class as `ThrownObject_ST` to distinguish it as a state-transition model compared to the previous linear model class. " ] }, { @@ -344,17 +361,17 @@ " Model that simulates an object thrown into the air without air resistance\n", " \"\"\"\n", "\n", - " inputs = [] # no inputs, no way to control\n", + " inputs = [] # no inputs, no way to control\n", " states = [\n", - " 'x', # Position (m) \n", - " 'v' # Velocity (m/s)\n", - " ]\n", - " outputs = [ # Anything we can measure\n", - " 'x' # Position (m)\n", + " \"x\", # Position (m)\n", + " \"v\", # Velocity (m/s)\n", + " ]\n", + " outputs = [ # Anything we can measure\n", + " \"x\" # Position (m)\n", " ]\n", " events = [\n", - " 'falling', # Event- object is falling\n", - " 'impact' # Event- object has impacted ground\n", + " \"falling\", # Event- object is falling\n", + " \"impact\", # Event- object has impacted ground\n", " ]" ] }, @@ -372,11 +389,10 @@ "outputs": [], "source": [ "class ThrownObject_ST(ThrownObject_ST):\n", - "\n", " default_parameters = {\n", - " 'thrower_height': 1.83, # default height \n", - " 'throwing_speed': 40, # default speed\n", - " 'g': -9.81, # Acceleration due to gravity (m/s^2)\n", + " \"thrower_height\": 1.83, # default height\n", + " \"throwing_speed\": 40, # default speed\n", + " \"g\": -9.81, # Acceleration due to gravity (m/s^2)\n", " }" ] }, @@ -384,25 +400,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "All prognostics models require some specific class functions. We'll define those next. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we'll need to add the functionality to set the initial state of the system. There are two ways to provide the logic to initialize model state. \n", + "All prognostics models require some specific class functions. We'll define those next.\n", "\n", - "1. Provide the initial state in `parameters['x0']`, or \n", - "2. Provide an `initialize` function \n", + "We'll first add the functionality to set the initial state of the system. There are two ways to provide the logic to initialize model state. \n", "\n", - "The first method here is preferred. If `parameters['x0']` are defined, there is no need to explicitly define an initialize method, and these parameter values will be used as the initial state. \n", + "1. Provide the initial state in `parameters['x0']`\n", + "2. Provide an `initialize` function \n", "\n", - "However, there are some cases where the initial state is a function of the input (`u`) or output (`z`) (e.g. a use-case where the input is also a state). In this case, an explicitly defined `initialize` method is required. \n", + "The first method is preferred since defining `parameters['x0']` means we don't need to explicitly define an `initialize` method as these parameter values will already be used as the initial state.\n", "\n", - "Here, we'll set our initial state by defining an `initialize` function. In the code below, note that the function can take arguments for both input `u` and output `z`, though these are optional. \n", + "However, there are some cases where the initial state is a function of the input (`u`) or output (`z`) (e.g. a use-case where the input is also a state), so an explicitly defined `initialize` method is required. \n", "\n", - "Note that for this example, defining initialize in this way is not necessary. We could have simply defined `parameters['x0']`. However, we choose to use this method for ease when using the `derived_params` feature, discussed in the next section. " + "In this example, we could set our initial state by simply defining `parameters['x0']`. However, we will use an `initialize` function for ease when using the `derived_params` feature, which will be discussed in the next section. In the code below, note that the function can take arguments for both input `u` and output `z`, though these are optional." ] }, { @@ -412,21 +421,24 @@ "outputs": [], "source": [ "class ThrownObject_ST(ThrownObject_ST):\n", - "\n", " def initialize(self, u=None, z=None):\n", - " return self.StateContainer({\n", - " 'x': self['thrower_height'], # Thrown, so initial altitude is height of thrower\n", - " 'v': self['throwing_speed'] # Velocity at which the ball is thrown - this guy is a professional baseball pitcher\n", - " })" + " return self.StateContainer(\n", + " {\n", + " \"x\": self[\n", + " \"thrower_height\"\n", + " ], # Initial height from which the ball is thrown\n", + " \"v\": self[\"throwing_speed\"], # Velocity at which the ball is thrown\n", + " }\n", + " )" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Next, the PrognosticsModel class requires that we define how the state transitions throughout time. For continuous models, this is defined with the method `dx`, which calculates the first derivative of the state at a specific time. For discrete systems, this is defined with the method `next_state`, using the state transition equation for the system. When possible, it is recommended to use the continuous (`dx`) form, as some algorithms will only work on continuous models.\n", + "The `PrognosticsModel` class requires that we define how the state transitions throughout time. For continuous models, this is defined with the method `dx`, which calculates the first derivative of the state at a specific time. For discrete systems, this is defined with the method `next_state`, using the state transition equation for the system. When possible, it is recommended to use the continuous (`dx`) form, as some algorithms will only work on continuous models.\n", "\n", - "Here, we use the equations for the derivatives of our system (i.e., the continuous form)." + "Here, we will use the equations for the derivatives of our system (i.e., the continuous form)." ] }, { @@ -436,11 +448,10 @@ "outputs": [], "source": [ "class ThrownObject_ST(ThrownObject_ST):\n", - "\n", " def dx(self, x, u):\n", - " return self.StateContainer({\n", - " 'x': x['v'], \n", - " 'v': self['g']}) # Acceleration of gravity" + " return self.StateContainer(\n", + " {\"x\": x[\"v\"], \"v\": self[\"g\"]}\n", + " ) # Acceleration of gravity" ] }, { @@ -457,9 +468,8 @@ "outputs": [], "source": [ "class ThrownObject_ST(ThrownObject_ST):\n", - " \n", " def output(self, x):\n", - " return self.OutputContainer({'x': x['x']})" + " return self.OutputContainer({\"x\": x[\"x\"]})" ] }, { @@ -467,7 +477,7 @@ "metadata": {}, "source": [ "The next method we define is [`event_state`](https://nasa.github.io/progpy/glossary.html#term-event-state). As before, \n", - "`event_state` calculates the progress towards the events. Normalized to be between 0 and 1, 1 means there is no progress towards the event and 0 means the event has occurred. " + "`event_state` calculates the progress towards the events. This is normalized to be between 0 and 1, where 1 means there is no progress towards the event and 0 means the event has occurred. " ] }, { @@ -477,15 +487,18 @@ "outputs": [], "source": [ "class ThrownObject_ST(ThrownObject_ST):\n", - " \n", - " def event_state(self, x): \n", + " def event_state(self, x):\n", " # Use speed and position to estimate maximum height\n", - " x_max = x['x'] + np.square(x['v'])/(-self['g']*2)\n", + " x_max = x[\"x\"] + np.square(x[\"v\"]) / (-self[\"g\"] * 2)\n", " # 1 until falling begins\n", - " x_max = np.where(x['v'] > 0, x['x'], x_max)\n", + " x_max = np.where(x[\"v\"] > 0, x[\"x\"], x_max)\n", " return {\n", - " 'falling': max(x['v']/self['throwing_speed'],0), # Throwing speed is max speed\n", - " 'impact': max(x['x']/x_max,0) # 1 until falling begins, then it's fraction of height\n", + " \"falling\": max(\n", + " x[\"v\"] / self[\"throwing_speed\"], 0\n", + " ), # Throwing speed is max speed\n", + " \"impact\": max(\n", + " x[\"x\"] / x_max, 0\n", + " ), # 1 until falling begins, then it's fraction of height\n", " }" ] }, @@ -493,16 +506,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "At this point, we have defined all necessary information for the PrognosticsModel to be complete. There are other methods that can additionally be defined (see the [PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html) documentation for more information) to provide further configuration for new models. We'll highlight some of these in the following sections. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As an example of one of these, we additionally define a `threshold_met` equation. Note that this is optional. Leaving `threshold_met` empty will use the event state to define thresholds (threshold = event state == 0). However, this implementation is more efficient, so we include it. \n", + "At this point, we have defined all necessary information for our new model to be complete. There are other methods that can be defined to provide additional configuration, and we'll highlight some of them in the following sections. We can also refer to the [PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html) documentation for more information.\n", + "\n", + "For example, we can optionally include a `threshold_met` equation. Without an explicit definition, `threshold_met` will use the event state to define thresholds (threshold = event state == 0). However, this implementation is more efficient, so we will include it.\n", "\n", - "Here, we define `threshold_met` in the same way as our linear model example. `threshold_met` will return a _dict_ of values, one for each event. Threshold is met when all dictionary entries are __True__. " + "We will define `threshold_met` in the same way as our linear model example. `threshold_met` will return a _dict_ of values, one for each event. The threshold will be met when all dictionary entries are `True`. " ] }, { @@ -512,11 +520,11 @@ "outputs": [], "source": [ "class ThrownObject_ST(ThrownObject_ST):\n", - "\n", " def threshold_met(self, x):\n", " return {\n", - " 'falling': x['v'] < 0, # Falling occurs when velocity becomes negative\n", - " 'impact': x['x'] <= 0 # Impact occurs when the object hits the ground, i.e. position is <= 0\n", + " \"falling\": x[\"v\"] < 0, # Falling occurs when velocity becomes negative\n", + " \"impact\": x[\"x\"]\n", + " <= 0, # Impact occurs when the object hits the ground, i.e. position is <= 0\n", " }" ] }, @@ -524,14 +532,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With that, we have created a new ThrownObject state-transition model. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's can test our model through simulation. First, we'll create an instance of the model." + "We have now created a new `ThrownObject_ST` model. Let's now test our model through simulation. \n", + "\n", + "First, we'll create an instance of the model." ] }, { @@ -547,7 +550,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We'll start by simulating to impact. We'll specify the `events` to specifically indicate we are interested in impact. For more information on simulation, see 1. Simulation. " + "We'll then simulate to impact, as specified in `events`. For more information on how simulation works, refer to __[01 Simulation](01_Simulation.ipynb)__. " ] }, { @@ -557,67 +560,87 @@ "outputs": [], "source": [ "# Simulate to impact\n", - "event = 'impact'\n", - "simulated_results = m_st.simulate_to_threshold(events=event, dt=0.005, save_freq=1, print = True)\n", + "event = \"impact\"\n", + "simulated_results = m_st.simulate_to_threshold(\n", + " events=event, dt=0.005, save_freq=1, print=True\n", + ")\n", "\n", - "# Print result: \n", - "print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2)))" + "# Print result:\n", + "print(\n", + " \"The object hit the ground in {} seconds\".format(\n", + " round(simulated_results.times[-1], 2)\n", + " )\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To summarize this section, we have illustrated how to construct new physics-based models by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel). Some elements (e.g. inputs, states, outputs, events keys; methods for initialization, dx/next_state, output, and event_state) are required. Models can be additionally configured with additional methods and parameters.\n", - "\n", - "Note that in this example, we defined each part one piece at a time, recursively subclassing the partially defined class. This was done to illustrate the parts of the model. In reality, all methods and properties would be defined together in a single class definition. " + "Let's now plot the results." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "## Derived Parameters" + "fig = simulated_results.outputs.plot(\n", + " title=\"ThrownObject_ST model simulation output\",\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"position (m)\",\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the previous section, we constructed a new model from scratch by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel) and specifying all of the necessary model components. An additional optional feature of PrognosticsModels is derived parameters, illustrated below. \n", + "We can see the parabolic shape of the object being thrown in the air.\n", "\n", - "A derived parameter is a parameter (see parameter section in 1. Simulation) that is a function of another parameter. For example, in the case of a thrown object, one could assume that throwing speed is a function of thrower height, with taller throwing height resulting in faster throwing speeds. In the electrochemistry battery model (see 3. Included Models), there are parameters for the maximum and minimum charge at the surface and bulk, and these are dependent on the capacity of the battery (i.e. another parameter, qMax). When such derived parameters exist, they must be updated whenever the parameters they depend on are updated. In PrognosticsModels, this is achieved with the `derived_params` feature. \n", + "We have so far illustrated how to construct new physics-based models by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel). Some elements (e.g. `inputs`, `states`, `outputs`, events keys, methods for initialization, `dx` or `next_state`, `output`, and `event_state`) are required. Models can be additionally configured with additional methods and parameters.\n", "\n", - "This feature can also be used to cache combinations of parameters that are used frequently in state transition or other model methods. Creating lumped parameters using `derived_params` causes them to be calculated once when configuring, instead of each time step in simulation or prediction. " + "In the previous example, we defined each part one piece at a time, recursively subclassing the partially defined class. This was done to illustrate the parts of the model. In actual usage, all methods and properties should be defined together in a single class definition." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "For this example, we will use the `ThrownObject_ST` model created in the previous section. We will extend this model to include a derived parameter, namely `throwing_speed` will be dependent on `thrower_height`.\n", - "\n", - "To implement this, we must first define a function for the relationship between the two parameters. We'll assume that `throwing_speed` is a linear function of `thrower_height`. " + "## Direct Models" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "def update_thrown_speed(params):\n", - " return {\n", - " 'throwing_speed': params['thrower_height'] * 21.85\n", - " } \n", - " # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary" + "In the previous sections, we illustrated how to create and use state-transition models, or models that use state transition differential equations to propagate the state forward. In this example, we'll explore another type of model implemented within ProgPy: direct models. \n", + "\n", + "Direct models estimate the time of event directly from the system state and future load, rather than through state transitions. This approach is particularly useful for physics-based models where the differential equations of state transitions can be explicitly solved, or for data-driven models that map sensor data directly to the time of an event. When applicable, using a direct model approach provides a more efficient way to estimate the time of an event, especially for events that occur later in the simulation.\n", + "\n", + "To illustrate this concept, we will extend the state-transition model, `ThrownObject_ST`, defined above, to create a new model class, `DirectThrownObject`. The dynamics of a thrown object lend easily to a direct model, since we can solve the differential equations explicitly to estimate the time at which the events occur. \n", + "\n", + "Recall that our physical system is described by the following differential equations: \n", + "\\begin{align*}\n", + "\\frac{dx}{dt} &= v \\\\ \\\\\n", + "\\frac{dv}{dt} &= -g \n", + "\\end{align*}\n", + "\n", + "This can be solved explicity given initial position $x_0$ and initial velocity $v_0$:\n", + "\\begin{align*}\n", + "x(t) &= -\\frac{1}{2} gt^2 + v_0 t + x_0 \\\\ \\\\ \n", + "v(t) &= -gt + v_0\n", + "\\end{align*}\n", + "\n", + "Setting these equations to 0 and solving for time, we get the time at which the object hits the ground and begins falling, respectively. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we'll define the parameter callbacks, so that `throwing_speed` is updated appropriately any time that `thrower_height` changes. The following effectively tells the derived callbacks feature to call the `update_thrown_speed` function whenever the `thrower_height` changes. " + "To construct our direct model, we'll extend the `ThrownObject_ST` model to additionally include the method [time_to_event](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.time_of_event). This method will calculate the time at which each event occurs (i.e., time when the event threshold is met), based on the equations above. `time_of_event` must be implemented by any direct model. " ] }, { @@ -626,25 +649,33 @@ "metadata": {}, "outputs": [], "source": [ - "class ThrownObject_ST(ThrownObject_ST):\n", + "class DirectThrownObject(ThrownObject_ST):\n", + " def time_of_event(self, x, *args, **kwargs):\n", + " # calculate time when object hits ground given x['x'] and x['v']\n", + " # 0 = x0 + v0*t - 0.5*g*t^2\n", + " g = self[\"g\"]\n", + " t_impact = -(x[\"v\"] + np.sqrt(x[\"v\"] * x[\"v\"] - 2 * g * x[\"x\"])) / g\n", "\n", - " param_callbacks = {\n", - " 'thrower_height': [update_thrown_speed]\n", - " }" + " # 0 = v0 - g*t\n", + " t_falling = -x[\"v\"] / g\n", + "\n", + " return {\"falling\": t_falling, \"impact\": t_impact}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "You can also have more than one function be called when a single parameter is changed. You would do this by adding the additional callbacks to the list (e.g., 'thrower_height': [update_thrown_speed, other_fcn])" + "With this, our direct model is created. Note that adding `*args` and `**kwargs` is optional. Having these arguments makes the function interchangeable with other models which may have arguments or keyword arguments. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We have now added the capability for `throwing_speed` to be a derived parameter. Let's try it out. First, we'll create an instance of our class and print out the default parameters. " + "Now let's test out this capability. To do so, we'll use the `time` package to compare the direct model to our original timeseries model. \n", + "\n", + "Let's start by creating an instance of our timeseries model, calculating the time of event, and timing this computation. Note that for a state transition model, `time_of_event` still returns the time at which `threshold_met` returns true for each event, but this is calculated by simulating to threshold." ] }, { @@ -653,15 +684,27 @@ "metadata": {}, "outputs": [], "source": [ - "obj = ThrownObject_ST()\n", - "print(\"Default Settings:\\n\\tthrower_height: {}\\n\\tthrowing_speed: {}\".format(obj['thrower_height'], obj['throwing_speed']))" + "import time\n", + "\n", + "m_timeseries = ThrownObject_ST()\n", + "x = m_timeseries.initialize()\n", + "print(\n", + " m_timeseries.__class__.__name__,\n", + " \"(Direct Model)\" if m_timeseries.is_direct else \"(Timeseries Model)\",\n", + ")\n", + "tic = time.perf_counter()\n", + "print(\"Time of event: \", m_timeseries.time_of_event(x, dt=0.05))\n", + "toc = time.perf_counter()\n", + "print(f\"execution: {(toc - tic) * 1000:0.4f} milliseconds\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now, let's change the thrower height. If our derived parameters work correctly, the thrower speed should change accordingly. " + "Now let's do the same using our direct model implementation. In this case, when `time_to_event` is called, the event time will be estimated directly from the state, instead of through simulation to threshold. \n", + "\n", + "Note that a limitation of a direct model is that you cannot get intermediate states (i.e., `save_pts` or `save_freq`) since the time of event is calculated directly. " ] }, { @@ -670,66 +713,60 @@ "metadata": {}, "outputs": [], "source": [ - "obj['thrower_height'] = 1.75 # Our thrower is 1.75 m tall\n", - "print(\"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(obj['thrower_height'], obj['throwing_speed']))" + "m_direct = DirectThrownObject()\n", + "x = m_direct.initialize() # Using Initial state\n", + "# Now instead of simulating to threshold, we can estimate it directly from the state, like so\n", + "print(\n", + " \"\\n\",\n", + " m_direct.__class__.__name__,\n", + " \"(Direct Model)\" if m_direct.is_direct else \"(Timeseries Model)\",\n", + ")\n", + "tic = time.perf_counter()\n", + "print(\"Time of event: \", m_direct.time_of_event(x))\n", + "toc = time.perf_counter()\n", + "print(f\"execution: {(toc - tic) * 1000:0.4f} milliseconds\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "As we can see, when the thrower height was changed, the throwing speed was re-calculated too. \n", + "Notice that execution is significantly faster for the direct model. Furthermore, the result is actually more accurate, since it's not limited by the timestep (see dt section in __[01 Simulation](01_Simulation.ipynb)__). These observations will be even more pronounced for events that occur later in the simulation. \n", "\n", - "In this example, we illustrated how to use the `derived_params` feature, which allows a parameter to be a function of another parameter. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Direct Models" + "It's important to note that this is a very simple example, as there are no inputs. For models with inputs, future loading must be provided to `time_of_event` (see the future loading section in __[01 Simulation](01_Simulation.ipynb)__). In these cases, most direct models will encode or discretize the future loading profile to use it in a direct estimation of time of event.\n", + "\n", + "In the example provided, we have illustrated how to use a direct model. Direct models are a powerful tool for estimating the time of an event directly from the system state. By avoiding the process of state transitions, direct models can provide more efficient event time estimates. Additionally, the direct model approach is not limited to physics-based models. It can also be applied to data-driven models that can map sensor data directly to the time of an event. \n", + "\n", + "In conclusion, direct models offer an efficient and versatile approach for prognostics modeling, enabling faster and more direct estimations of event times. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the previous sections, we illustrated how to create and use state-transition models, or models that use state transition differential equations to propagate the state forward. In this example, we'll explore another type of model implemented within ProgPy - Direct Models. " + "## Advanced Features" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Direct models estimate the time of event directly from the system state and future load, rather than through state transitions. This approach is particularly useful for physics-based models where the differential equations of state transitions can be explicitly solved, or for data-driven models that map sensor data directly to the time of an event. When applicable, using a direct model approach provides a more efficient way to estimate the time of an event, especially for events that occur later in the simulation. " + "### Derived Parameters" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To illustrate this concept, we will extend the state-transition model, `ThrownObject_ST`, defined above, to create a new model class, `DirectThrownObject`. The dynamics of a thrown object lend easily to a direct model, since we can solve the differential equations explicitly to estimate the time at which the events occur. \n", + "In the previous section, we constructed a new model from scratch by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel) and specifying all of the necessary model components. An additional optional feature of `PrognosticsModel` is derived parameters, illustrated below. \n", "\n", - "Recall that our physical system is described by the following differential equations: \n", - "\\begin{align*}\n", - "\\frac{dx}{dt} &= v \\\\ \\\\\n", - "\\frac{dv}{dt} &= -g \n", - "\\end{align*}\n", + "A derived parameter is a parameter that is a function of another parameter. For example, in the case of a thrown object, one could assume that throwing speed is a function of thrower height, with taller throwing height resulting in faster throwing speeds. In the electrochemistry battery model (see __[03 Included Models](03_Existing%20Models.ipynb)__), there are parameters for the maximum and minimum charge at the surface and bulk, and these are dependent on the capacity of the battery (i.e. another parameter, `qMax`). When such derived parameters exist, they must be updated whenever the parameters they depend on are updated. In `PrognosticsModels`, this is achieved with the `derived_params` feature. \n", "\n", - "which can be solved explicity, given initial position $x_0$ and initial velocity $v_0$, to get:\n", - "\\begin{align*}\n", - "x(t) &= -\\frac{1}{2} gt^2 + v_0 t + x_0 \\\\ \\\\ \n", - "v(t) &= -gt + v_0\n", - "\\end{align*}\n", + "This feature can also be used to cache combinations of parameters that are used frequently in state transition or other model methods. Creating lumped parameters using `derived_params` causes them to be calculated once when configuring, instead of each time step in simulation or prediction. \n", "\n", - "Setting these equations to 0 and solving for time, we get the time at which the object hits the ground and begins falling, respectively. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To construct our direct model, we'll extend the `ThrownObject_ST` model to additionally include the method [time_to_event](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.time_of_event). This method will calculate the time at which each event occurs (i.e., time when the event threshold is met), based on the equations above. `time_of_event` must be implemented by any direct model. " + "For this example, we will use the `ThrownObject_ST` model created in a previous section. We will extend this model to include a derived parameter, namely `throwing_speed` to be dependent on `thrower_height`.\n", + "\n", + "To implement this, we must first define a function for the relationship between the two parameters. We'll assume that `throwing_speed` is a linear function of `thrower_height`. " ] }, { @@ -738,34 +775,16 @@ "metadata": {}, "outputs": [], "source": [ - "class DirectThrownObject(ThrownObject_ST):\n", - " \n", - " def time_of_event(self, x, *args, **kwargs):\n", - " # calculate time when object hits ground given x['x'] and x['v']\n", - " # 0 = x0 + v0*t - 0.5*g*t^2\n", - " g = self['g']\n", - " t_impact = -(x['v'] + np.sqrt(x['v']*x['v'] - 2*g*x['x']))/g\n", - "\n", - " # 0 = v0 - g*t\n", - " t_falling = -x['v']/g\n", - " \n", - " return {'falling': t_falling, 'impact': t_impact}\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With this, our direct model is created. Note that adding `*args` and `**kwargs` is optional. Having these arguments makes the function interchangeable with other models which may have arguments or keyword arguments. " + "def update_thrown_speed(params):\n", + " return {\"throwing_speed\": params[\"thrower_height\"] * 21.85}\n", + " # One or more parameters can be changed in these functions, and parameters that are changed are returned in the dictionary" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's test out this capability. To do so, we'll use the `time` package to compare the direct model to our original timeseries model. \n", - "\n", - "Let's start by creating an instance of our timeseries model, calculating the time of event, and timing this computation. Note that for a state transition model, `time_of_event` still returns the time at which `threshold_met` returns true for each event, but this is calculated by simulating to threshold." + "Next, we'll define the parameter callbacks, so that `throwing_speed` is updated appropriately any time that `thrower_height` changes. The following effectively tells the derived callbacks feature to call the `update_thrown_speed` function whenever the `thrower_height` changes. " ] }, { @@ -774,24 +793,17 @@ "metadata": {}, "outputs": [], "source": [ - "import time \n", - "\n", - "m_timeseries = ThrownObject_ST()\n", - "x = m_timeseries.initialize()\n", - "print(m_timeseries.__class__.__name__, \"(Direct Model)\" if m_timeseries.is_direct else \"(Timeseries Model)\")\n", - "tic = time.perf_counter()\n", - "print('Time of event: ', m_timeseries.time_of_event(x, dt = 0.05))\n", - "toc = time.perf_counter()\n", - "print(f'execution: {(toc-tic)*1000:0.4f} milliseconds')" + "class ThrownObject_ST(ThrownObject_ST):\n", + " param_callbacks = {\"thrower_height\": [update_thrown_speed]}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's do the same using our direct model implementation. In this case, when `time_to_event` is called, the event time will be estimated directly from the state, instead of through simulation to threshold. \n", + "We can also have more than one function be called when a single parameter is changed. We could do this by adding the additional callbacks to the list (e.g., `thrower_height`: [`update_thrown_speed`, `other_fcn`])\n", "\n", - "Note that a limitation of a direct model is that you cannot get intermediate states (i.e., save_pts or save_freq) since the time of event is calculated directly. " + "We have now added the capability for `throwing_speed` to be a derived parameter. Let's try it out. First, we'll create an instance of our class and print out the default parameters. " ] }, { @@ -800,61 +812,61 @@ "metadata": {}, "outputs": [], "source": [ - "m_direct = DirectThrownObject()\n", - "x = m_direct.initialize() # Using Initial state\n", - "# Now instead of simulating to threshold, we can estimate it directly from the state, like so\n", - "print('\\n', m_direct.__class__.__name__, \"(Direct Model)\" if m_direct.is_direct else \"(Timeseries Model)\")\n", - "tic = time.perf_counter()\n", - "print('Time of event: ', m_direct.time_of_event(x))\n", - "toc = time.perf_counter()\n", - "print(f'execution: {(toc-tic)*1000:0.4f} milliseconds')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Notice that execution is significantly faster for the direct model. Furthermore, the result is actually more accurate, since it's not limited by the timestep (see dt section in 1. Simulation). These observations will be even more pronounced for events that occur later in the simulation. \n", - "\n", - "It's important to note that this is a very simple example, as there are no inputs. For models with inputs, future loading must be provided to `time_of_event` (see the Future Loading section in 1. Simulation). In these cases, most direct models will encode or discretize the future loading profile to use it in a direct estimation of time of event." + "obj = ThrownObject_ST()\n", + "print(\n", + " \"Default Settings:\\n\\tthrower_height: {}\\n\\tthrowing_speed: {}\".format(\n", + " obj[\"thrower_height\"], obj[\"throwing_speed\"]\n", + " )\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the example provided, we have illustrated how to use a direct model. Direct models are a powerful tool for estimating the time of an event directly from the system state. By avoiding the process of state transitions, direct models can provide more efficient event time estimates. Additionally, the direct model approach is not limited to physics-based models. It can also be applied to data-driven models that can map sensor data directly to the time of an event. \n", - "\n", - "In conclusion, direct models offer an efficient and versatile approach for prognostics modeling, enabling faster and more direct estimations of event times. " + "Now, let's change the thrower height. If our derived parameters work correctly, the thrower speed should change accordingly. " ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "## Matrix Data Access Feature" + "obj[\"thrower_height\"] = 1.75 # Our thrower is 1.75 m tall\n", + "print(\n", + " \"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(\n", + " obj[\"thrower_height\"], obj[\"throwing_speed\"]\n", + " )\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the above models, we have used dictionaries to represent the states. For example, in the implementation of `ThrownObject_ST` above, see how `dx` is defined with a StateContainer dictionary. While all models can be constructed using dictionaries in this way, some dynamical systems allow for the state of the system to be represented with a matrix. For such use-cases, ProgPy has an advanced *matrix data access feature* that provides a more efficient way to define these models.\n", + "As we can see, when the thrower height was changed, the throwing speed was re-calculated too. \n", "\n", - "In ProgPy's implementation, the provided model.StateContainer, InputContainer, and OutputContainers can be treated as dictionaries but use an underlying matrix. This is important for some applications like surrogate and machine-learned models where the state is represented by a tensor. ProgPy's *matrix data access feature* allows the matrices to be used directly. Simulation functions propagate the state using the matrix form, preventing the inefficiency of having to convert to and from dictionaries. Additionally, this implementation is faster than recreating the StateContainer each time, especially when updating inplace." + "In this example, we illustrated how to use the `derived_params` feature, which allows a parameter to be a function of another parameter. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In this example, we'll illustrate how to use the matrix data access feature. We'll continue with our ThrownObject system, and create a model to simulate this using matrix notation (instead of dictionary notation as in the standard model, seen above in `ThrownObject_ST`). The implementation of the model is comparable to a standard model, except that it uses matrix operations within each function, as seen below. " + "### Matrix Data Access" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "In the above models, we have used dictionaries to represent the states. For example, in the implementation of `ThrownObject_ST` above, see how `dx` is defined with a `StateContainer` dictionary. While all models can be constructed using dictionaries in this way, some dynamic systems allow for the state of the system to be represented with a matrix. For such use-cases, ProgPy has an advanced matrix data access feature that provides a more efficient way to define these models.\n", + "\n", + "In ProgPy's implementation, the provided model. `StateContainer`, `InputContainer`, and `OutputContainer` can be treated as dictionaries but use an underlying matrix. This is important for some applications like surrogate and machine-learned models where the state is represented by a tensor. ProgPy's matrix data access feature allows the matrices to be used directly. Simulation functions propagate the state using the matrix form, preventing the inefficiency of having to convert to and from dictionaries. Additionally, this implementation is faster than recreating the `StateContainer` each time, especially when updating in place.\n", + "\n", + "In this example, we'll illustrate how to use the matrix data access feature. We'll continue with our `ThrownObject` system, and create a model to simulate this using matrix notation (instead of dictionary notation as in the standard model, seen above in `ThrownObject_ST`). The implementation of the model is comparable to a standard model, except that it uses matrix operations within each function, as seen below. \n", + "\n", "First, the necessary imports." ] }, @@ -865,7 +877,7 @@ "outputs": [], "source": [ "import numpy as np\n", - "from progpy import PrognosticsModel" + "import time" ] }, { @@ -876,7 +888,7 @@ "\n", "To use the matrix data access feature, we'll use matrices to define how the state transitions. Since we are working with a discrete version of the system now, we'll define the `next_state` method, and this will override the `dx` method in the parent class. \n", "\n", - "In the following, we will use the matrix version for each variable, accessed with `.matrix`. We implement this within `next_state`, but this feature can also be used in other functions. Here, both `x.matrix` and `u.matrix` are column vectors, and `u.matrix` is in the same order as model.inputs." + "In the following, we will use the matrix version for each variable, accessed with `.matrix`. We implement this within `next_state`, but this feature can also be used in other functions. Here, both `x.matrix` and `u.matrix` are column vectors, and `u.matrix` is in the same order as `model.inputs`." ] }, { @@ -886,11 +898,9 @@ "outputs": [], "source": [ "class ThrownObject_MM(ThrownObject_ST):\n", - "\n", " def next_state(self, x, u, dt):\n", - "\n", " A = np.array([[0, 1], [0, 0]]) # State transition matrix\n", - " B = np.array([[0], [self['g']]]) # Acceleration due to gravity\n", + " B = np.array([[0], [self[\"g\"]]]) # Acceleration due to gravity\n", " x.matrix += (np.matmul(A, x.matrix) + B) * dt\n", "\n", " return x" @@ -900,14 +910,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Our model is now specified. Let's try simulating with it." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we'll create an instance of the model." + "Our model is now specified. Let's try simulating with it.\n", + "\n", + "Let's create an instance of the model." ] }, { @@ -932,15 +937,11 @@ "metadata": {}, "outputs": [], "source": [ - "import time \n", - "\n", "tic_matrix = time.perf_counter()\n", - "# Simulate to threshold \n", - "m_matrix.simulate_to_threshold(\n", - " print = True, \n", - " events = 'impact', \n", - " dt = 0.1, \n", - " save_freq = 1)\n", + "\n", + "# Simulate to threshold\n", + "m_matrix.simulate_to_threshold(print=True, events=\"impact\", dt=0.1, save_freq=1)\n", + "\n", "toc_matrix = time.perf_counter()" ] }, @@ -961,28 +962,19 @@ "outputs": [], "source": [ "tic_st = time.perf_counter()\n", - "m_st.simulate_to_threshold(\n", - " print = True, \n", - " events = 'impact', \n", - " dt = 0.1, \n", - " save_freq = 1)\n", + "m_st.simulate_to_threshold(print=True, events=\"impact\", dt=0.1, save_freq=1)\n", "toc_st = time.perf_counter()\n", "\n", - "print(f'Matrix execution: {(toc_matrix-tic_matrix)*1000:0.4f} milliseconds')\n", - "print(f'Non-matrix execution: {(toc_st-tic_st)*1000:0.4f} milliseconds')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, for this system, using the matrix data access feature is computationally faster than a standard state-transition matrix that uses dictionaries. " + "print(f\"Matrix execution: {(toc_matrix - tic_matrix) * 1000:0.4f} milliseconds\")\n", + "print(f\"Non-matrix execution: {(toc_st - tic_st) * 1000:0.4f} milliseconds\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "As we can see, for this system, using the matrix data access feature is computationally faster than a standard state-transition matrix that uses dictionaries.\n", + "\n", "As illustrated here, the matrix data access feature is an advanced capability that represents the state of a system using matrices. This can provide efficiency for use-cases where the state is easily represented by a tensor and operations are defined by matrices." ] }, @@ -990,20 +982,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## State Limits" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In real-world physical systems, there are often constraints on what values the states can take. For example, in the case of a thrown object, if we define our reference frame with the ground at a position of $x=0$, then the position of the object should only be greater than or equal to 0, and should never take on negative values. In ProgPy, we can enforce constraints on the range of each state for a state-transition model using the [state limits](https://nasa.github.io/progpy/prog_models_guide.html#state-limits) attribute. " + "### State Limits" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "In real-world physical systems, there are often constraints on what values the states can take. For example, in the case of a thrown object, if we define our reference frame with the ground at a position of $x=0$, then the position of the object should only be greater than or equal to 0, and should never take on negative values. In ProgPy, we can enforce constraints on the range of each state for a state-transition model using the [state limits](https://nasa.github.io/progpy/prog_models_guide.html#state-limits) attribute. \n", + "\n", "To illustrate the use of `state_limits`, we'll use our thrown object model `ThrownObject_ST`, created in an above section. " ] }, @@ -1029,12 +1016,12 @@ "metadata": {}, "outputs": [], "source": [ - "event = 'impact'\n", + "event = \"impact\"\n", "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1)\n", "\n", - "print('Example: No State Limits')\n", + "print(\"Example: No State Limits\")\n", "for i, state in enumerate(simulated_results.states):\n", - " print(f'State {i}: {state}')\n", + " print(f\"State {i}: {state}\")\n", "print()" ] }, @@ -1042,13 +1029,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Notice that at the end of the simulation, the object's position (`x`) is negative. This doesn't make sense physically, since the object cannot fall below ground level (at $x=0$)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ + "Notice that at the end of the simulation, the object's position (`x`) is negative. This doesn't make sense physically, since the object cannot fall below ground level (at $x=0$).\n", + "\n", "To avoid this, and keep the state in a realistic range, we can change the `state_limits` attribute of the model. The `state_limits` attribute is a dictionary that contains the state limits for each state. The keys of the dictionary are the state names, and the values are tuples that contain the lower and upper limits of the state. \n", "\n", "In our Thrown Object model, our states are position, which can range from 0 to infinity, and velocity, which we'll limit to not exceed the speed of light." @@ -1065,10 +1047,9 @@ "\n", "m_limits.state_limits = {\n", " # object position may not go below ground height\n", - " 'x': (0, inf),\n", - "\n", + " \"x\": (0, inf),\n", " # object velocity may not exceed the speed of light\n", - " 'v': (-299792458, 299792458)\n", + " \"v\": (-299792458, 299792458),\n", "}" ] }, @@ -1085,12 +1066,12 @@ "metadata": {}, "outputs": [], "source": [ - "event = 'impact'\n", + "event = \"impact\"\n", "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1)\n", "\n", - "print('Example: With State Limits')\n", + "print(\"Example: With State Limits\")\n", "for i, state in enumerate(simulated_results.states):\n", - " print(f'State {i}: {state}')\n", + " print(f\"State {i}: {state}\")\n", "print()" ] }, @@ -1114,15 +1095,17 @@ "metadata": {}, "outputs": [], "source": [ - "x0 = m_limits.initialize(u = {}, z = {})\n", - "x0['x'] = -1 # Initial position value set to an unrealistic value of -1\n", + "x0 = m_limits.initialize(u={}, z={})\n", + "x0[\"x\"] = -1 # Initial position value set to an unrealistic value of -1\n", "\n", - "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1, x = x0)\n", + "simulated_results = m_limits.simulate_to_threshold(\n", + " events=event, dt=0.005, save_freq=1, x=x0\n", + ")\n", "\n", "# Print states\n", - "print('Example 2: With -1 as initial x value')\n", + "print(\"Example 2: With -1 as initial x value\")\n", "for i, state in enumerate(simulated_results.states):\n", - " print('State ', i, ': ', state)\n", + " print(\"State \", i, \": \", state)\n", "print()" ] }, @@ -1146,11 +1129,11 @@ "metadata": {}, "outputs": [], "source": [ - "x = {'x': -5, 'v': 3e8} # Too fast and below the ground\n", - "print('\\t Pre-limit: {}'.format(x))\n", + "x = {\"x\": -5, \"v\": 3e8} # Too fast and below the ground\n", + "print(\"\\t Pre-limit: {}\".format(x))\n", "\n", "x = m_limits.apply_limits(x)\n", - "print('\\t Post-limit: {}'.format(x))" + "print(\"\\t Post-limit: {}\".format(x))" ] }, { @@ -1159,36 +1142,26 @@ "source": [ "In conclusion, setting appropriate [state limits](https://nasa.github.io/progpy/prog_models_guide.html#state-limits) is crucial in creating realistic and accurate state-transition models. It ensures that the model's behavior stays within the constraints of the physical system. The limits should be set based on the physical or practical constraints of the system being modeled. \n", "\n", - "As a final note, state limits are especially important for state estimation (to be discussed in the State Estimation section), as it will force the state estimator to only consider states that are possible or feasible. State estimation will be described in more detail in section 08. State Estimation. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom Events" + "As a final note, state limits are especially important for state estimation (to be discussed in the State Estimation section), as it will force the state estimator to only consider states that are possible or feasible. State estimation will be described in more detail in section __[07 State Estimation](07_State%20Estimation.ipynb)__. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the examples above, we have focused on the simple event of a thrown object hitting the ground or reaching `impact`. In this section, we highlight additional uses of ProgPy's generalizable concept of `events`. " + "### Custom Events" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The term [events](https://nasa.github.io/progpy/prog_models_guide.html#events) is used to describe something to be predicted. Generally in the PHM community, these are referred to as End of Life (EOL). However, they can be much more. \n", + "In the examples above, we have focused on the simple event of a thrown object hitting the ground or reaching `impact`. In this section, we highlight additional uses of ProgPy's generalizable concept of `events`. \n", + "\n", + "The term [events](https://nasa.github.io/progpy/prog_models_guide.html#events) is used to describe something to be predicted. Generally in the PHM community, these are referred to as End of Life (`EOL`). However, they can be much more. \n", + "\n", + "In ProgPy, events can be anything that needs to be predicted. Systems will often have multiple failure modes, and each of these modes can be represented by a separate event. Additionally, events can also be used to predict other events of interest other than failure, such as special system states or warning thresholds. Thus, `events` in ProgPy can represent End of Life (`EOL`), End of Mission (`EOM`), warning thresholds, or any Event of Interest (`EOI`). \n", "\n", - "In ProgPy, events can be anything that needs to be predicted. Systems will often have multiple failure modes, and each of these modes can be represented by a separate event. Additionally, events can also be used to predict other events of interest other than failure, such as special system states or warning thresholds. Thus, `events` in ProgPy can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ "There are a few components of the model that must be specified in order to define events:\n", "\n", "1. The `events` property defines the expected events \n", @@ -1197,27 +1170,12 @@ "\n", "3. The `event_state` method returns an estimate of progress towards the threshold \n", "\n", - "Note that because of the interconnected relationship between `threshold_met` and `event_state`, it is only required to define one of these. However, it is generally beneficial to specify both. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To illustrate this concept, we will use the `BatteryElectroChemEOD` model (see section 03. Included Models). In the standard implementation of this model, the defined event is `EOD` or End of Discharge. This occurs when the voltage drops below a pre-defined threshold value. The State-of-Charge (SOC) of the battery is the event state for the EOD event. Recall that event states (and therefore SOC) vary between 0 and 1, where 1 is healthy and 0 signifies the event has occurred. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Suppose we have the requirement that our battery must not fall below 5% State-of-Charge. This would correspond to an `EOD` event state of 0.05. Additionally, let's add events for two warning thresholds, a $\\text{\\textcolor{yellow}{yellow}}$ threshold at 15% SOC and a $\\text{\\textcolor{red}{red}}$ threshold at 10% SOC. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ + "Note that because of the interconnected relationship between `threshold_met` and `event_state`, it is only required to define one of these. However, there are frequently computational advantages to specifying both. \n", + "\n", + "To illustrate this concept, we will use the `BatteryElectroChemEOD` model (see section 03. Included Models). In the standard implementation of this model, the defined event is `EOD` or End of Discharge. This occurs when the voltage drops below a pre-defined threshold value. The State-of-Charge (SOC) of the battery is the event state for the EOD event. Recall that event states (and therefore SOC) vary between 0 and 1, where 1 is healthy and 0 signifies the event has occurred. \n", + "\n", + "Suppose we have the requirement that our battery must not fall below 5% State-of-Charge. This would correspond to an `EOD` event state of 0.05. Additionally, let's add events for two warning thresholds, a $\\text{\\textcolor{yellow}{yellow}}$ threshold at 15% SOC and a $\\text{\\textcolor{red}{red}}$ threshold at 10% SOC. \n", + "\n", "To define the model, we'll start with the necessary imports." ] }, @@ -1245,9 +1203,9 @@ "metadata": {}, "outputs": [], "source": [ - "YELLOW_THRESH = 0.15 # 15% SOC\n", - "RED_THRESH = 0.1 # 10% SOC\n", - "THRESHOLD = 0.05 # 5% SOC" + "YELLOW_THRESH = 0.15 # 15% SOC\n", + "RED_THRESH = 0.1 # 10% SOC\n", + "THRESHOLD = 0.05 # 5% SOC" ] }, { @@ -1264,7 +1222,11 @@ "outputs": [], "source": [ "class BattNewEvent(BatteryElectroChemEOD):\n", - " events = BatteryElectroChemEOD.events + ['EOD_warn_yellow', 'EOD_warn_red', 'EOD_requirement_threshold']\n" + " events = BatteryElectroChemEOD.events + [\n", + " \"EOD_warn_yellow\",\n", + " \"EOD_warn_red\",\n", + " \"EOD_requirement_threshold\",\n", + " ]" ] }, { @@ -1281,17 +1243,21 @@ "outputs": [], "source": [ "class BattNewEvent(BattNewEvent):\n", - " \n", " def event_state(self, state):\n", " # Get event state from parent\n", " event_state = super().event_state(state)\n", "\n", " # Add yellow, red, and failure states by scaling EOD state\n", - " event_state['EOD_warn_yellow'] = (event_state['EOD']-YELLOW_THRESH)/(1-YELLOW_THRESH) \n", - " event_state['EOD_warn_red'] = (event_state['EOD']-RED_THRESH)/(1-RED_THRESH)\n", - " event_state['EOD_requirement_threshold'] = (event_state['EOD']-THRESHOLD)/(1-THRESHOLD)\n", + " event_state[\"EOD_warn_yellow\"] = (event_state[\"EOD\"] - YELLOW_THRESH) / (\n", + " 1 - YELLOW_THRESH\n", + " )\n", + " event_state[\"EOD_warn_red\"] = (event_state[\"EOD\"] - RED_THRESH) / (\n", + " 1 - RED_THRESH\n", + " )\n", + " event_state[\"EOD_requirement_threshold\"] = (event_state[\"EOD\"] - THRESHOLD) / (\n", + " 1 - THRESHOLD\n", + " )\n", "\n", - " # Return\n", " return event_state" ] }, @@ -1315,9 +1281,11 @@ "\n", " # Add yell and red states from event_state\n", " event_state = self.event_state(x)\n", - " t_met['EOD_warn_yellow'] = event_state['EOD_warn_yellow'] <= 0\n", - " t_met['EOD_warn_red'] = event_state['EOD_warn_red'] <= 0\n", - " t_met['EOD_requirement_threshold'] = event_state['EOD_requirement_threshold'] <= 0\n", + " t_met[\"EOD_warn_yellow\"] = event_state[\"EOD_warn_yellow\"] <= 0\n", + " t_met[\"EOD_warn_red\"] = event_state[\"EOD_warn_red\"] <= 0\n", + " t_met[\"EOD_requirement_threshold\"] = (\n", + " event_state[\"EOD_requirement_threshold\"] <= 0\n", + " )\n", "\n", " return t_met" ] @@ -1355,9 +1323,8 @@ "source": [ "# Variable (piecewise) future loading scheme\n", "future_loading = Piecewise(\n", - " m.InputContainer,\n", - " [600, 900, 1800, 3000],\n", - " {'i': [2, 1, 4, 2, 3]})" + " m.InputContainer, [600, 900, 1800, 3000], {\"i\": [2, 1, 4, 2, 3]}\n", + ")" ] }, { @@ -1370,26 +1337,32 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ - "simulated_results = m.simulate_to_threshold(future_loading, events='EOD', print = True)\n", - "\n", - "simulated_results.event_states.plot()\n", - "plt.show()" + "simulated_results = m.simulate_to_threshold(future_loading, events=\"EOD\", print=True)" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "Here, we can see the SOC plotted for the different events throughout time. The yellow warning (15% SOC) reaches threshold first, followed by the red warning (10% SOC), new EOD threshold (5% SOC), and finally the original EOD value. " + "simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\", title=\"BattNewEvent model simulation EOD event state\"\n", + ")\n", + "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "Here, we can see the SOC plotted for the different events throughout time. The yellow warning (15% SOC) reaches threshold first, followed by the red warning (10% SOC), new EOD threshold (5% SOC), and finally the original EOD value. \n", + "\n", "In this section, we have illustrated how to define custom [events](https://nasa.github.io/progpy/prog_models_guide.html#events) for prognostics models. Events can be used to define anything that a user is interested in predicting, including common values like Remaining Useful Life (RUL) and End of Discharge (EOD), as well as other values like special intermediate states or warning thresholds. " ] }, @@ -1397,7 +1370,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Serialization " + "### Serialization " ] }, { @@ -1408,13 +1381,8 @@ "\n", "Model serialization has a variety of purposes. For example, serialization allows us to save a specific model or model configuration to a file to be loaded later, or can aid us in sending a model to another machine over a network connection. Some users maintain a directory or repository of configured models representing specific systems in their stock.\n", "\n", - "In this section, we'll show how to serialize and deserialize model objects using `pickle` and `JSON` methods. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ + "In this section, we'll show how to serialize and deserialize model objects using `pickle` and `JSON` methods. \n", + "\n", "First, we'll import the necessary modules." ] }, @@ -1435,7 +1403,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For this example, we'll use the BatteryElectroChemEOD model. We'll start by creating a model object. " + "For this example, we'll use the `BatteryElectroChemEOD` model. We'll start by creating a model object. " ] }, { @@ -1451,13 +1419,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First, we'll serialize the model in two different ways using 1) `pickle` and 2) `JSON`. Then, we'll plot the results from simulating the deserialized models to show equivalence of the methods. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ + "First, we'll serialize the model in two different ways using 1) `pickle` and 2) `JSON`. Then, we'll plot the results from simulating the deserialized models to show equivalence of the methods. \n", + "\n", "To save using the `pickle` package, we'll serialize the model using the `dump` method. Once saved, we can then deserialize using the `load` method. In practice, deserializing will likely occur in a different file or in a later use-case, but here we deserialize to show equivalence of the saved model. " ] }, @@ -1467,8 +1430,8 @@ "metadata": {}, "outputs": [], "source": [ - "pickle.dump(batt, open('save_pkl.pkl', 'wb')) # Serialize model\n", - "load_pkl = pickle.load(open('save_pkl.pkl', 'rb')) # Deserialize model " + "pickle.dump(batt, open(\"save_pkl.pkl\", \"wb\")) # Serialize model\n", + "load_pkl = pickle.load(open(\"save_pkl.pkl\", \"rb\")) # Deserialize model" ] }, { @@ -1484,8 +1447,8 @@ "metadata": {}, "outputs": [], "source": [ - "save_json = batt.to_json() # Serialize model\n", - "json_1 = BatteryElectroChemEOD.from_json(save_json) # Deserialize model" + "save_json = batt.to_json() # Serialize model\n", + "json_1 = BatteryElectroChemEOD.from_json(save_json) # Deserialize model" ] }, { @@ -1505,7 +1468,7 @@ "txtFile.write(save_json)\n", "txtFile.close()\n", "\n", - "with open('save_json.txt') as infile: \n", + "with open(\"save_json.txt\") as infile:\n", " load_json = infile.read()\n", "\n", "json_2 = BatteryElectroChemEOD.from_json(load_json)" @@ -1528,9 +1491,8 @@ "source": [ "# Variable (piecewise) future loading scheme\n", "future_loading = Piecewise(\n", - " batt.InputContainer,\n", - " [600, 1000, 1500, 3000],\n", - " {'i': [3, 2, 1.5, 4]})" + " batt.InputContainer, [600, 1000, 1500, 3000], {\"i\": [3, 2, 1.5, 4]}\n", + ")" ] }, { @@ -1546,13 +1508,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Original model \n", - "results_orig = batt.simulate_to_threshold(future_loading, save_freq = 1)\n", - "# Pickled version \n", - "results_pkl = load_pkl.simulate_to_threshold(future_loading, save_freq = 1)\n", + "# Original model\n", + "results_orig = batt.simulate_to_threshold(future_loading, save_freq=1)\n", + "# Pickled version\n", + "results_pkl = load_pkl.simulate_to_threshold(future_loading, save_freq=1)\n", "# JSON versions\n", - "results_json_1 = json_1.simulate_to_threshold(future_loading, save_freq = 1)\n", - "results_json_2 = json_2.simulate_to_threshold(future_loading, save_freq = 1)\n" + "results_json_1 = json_1.simulate_to_threshold(future_loading, save_freq=1)\n", + "results_json_2 = json_2.simulate_to_threshold(future_loading, save_freq=1)" ] }, { @@ -1568,18 +1530,32 @@ "metadata": {}, "outputs": [], "source": [ - "voltage_orig = [results_orig.outputs[iter]['v'] for iter in range(len(results_orig.times))]\n", - "voltage_pkl = [results_pkl.outputs[iter]['v'] for iter in range(len(results_pkl.times))]\n", - "voltage_json_1 = [results_json_1.outputs[iter]['v'] for iter in range(len(results_json_1.times))]\n", - "voltage_json_2 = [results_json_2.outputs[iter]['v'] for iter in range(len(results_json_2.times))]\n", - "\n", - "plt.plot(results_orig.times,voltage_orig,'-b',label='Original surrogate') \n", - "plt.plot(results_pkl.times,voltage_pkl,'--r',label='Pickled serialized surrogate') \n", - "plt.plot(results_json_1.times,voltage_json_1,'-.g',label='First JSON serialized surrogate') \n", - "plt.plot(results_json_2.times, voltage_json_2, '--y', label='Second JSON serialized surrogate')\n", + "voltage_orig = [\n", + " results_orig.outputs[iter][\"v\"] for iter in range(len(results_orig.times))\n", + "]\n", + "voltage_pkl = [results_pkl.outputs[iter][\"v\"] for iter in range(len(results_pkl.times))]\n", + "voltage_json_1 = [\n", + " results_json_1.outputs[iter][\"v\"] for iter in range(len(results_json_1.times))\n", + "]\n", + "voltage_json_2 = [\n", + " results_json_2.outputs[iter][\"v\"] for iter in range(len(results_json_2.times))\n", + "]\n", + "\n", + "plt.plot(results_orig.times, voltage_orig, \"-b\", label=\"Original surrogate\")\n", + "plt.plot(results_pkl.times, voltage_pkl, \"--r\", label=\"Pickled serialized surrogate\")\n", + "plt.plot(\n", + " results_json_1.times, voltage_json_1, \"-.g\", label=\"First JSON serialized surrogate\"\n", + ")\n", + "plt.plot(\n", + " results_json_2.times,\n", + " voltage_json_2,\n", + " \"--y\",\n", + " label=\"Second JSON serialized surrogate\",\n", + ")\n", "plt.legend()\n", - "plt.xlabel('Time (sec)')\n", - "plt.ylabel('Voltage (volts)')" + "plt.title(\"Serialized model simulation outputs\")\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"voltage (V)\")" ] }, { @@ -1597,14 +1573,16 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "\n", "# Check if the arrays are the same\n", - "are_arrays_same = np.array_equal(voltage_orig, voltage_pkl) and \\\n", - " np.array_equal(voltage_orig, voltage_json_1) and \\\n", - " np.array_equal(voltage_orig, voltage_json_2)\n", + "are_arrays_same = (\n", + " np.array_equal(voltage_orig, voltage_pkl)\n", + " and np.array_equal(voltage_orig, voltage_json_1)\n", + " and np.array_equal(voltage_orig, voltage_json_2)\n", + ")\n", "\n", - "print(f\"The simulated results from the original and serialized models are {'identical. This means that our serialization works!' if are_arrays_same else 'not identical. This means that our serialization does not work.'}\")" + "print(\n", + " f\"The simulated results from the original and serialized models are {'identical. This means that our serialization works!' if are_arrays_same else 'not identical. This means that our serialization does not work.'}\"\n", + ")" ] }, { @@ -1618,7 +1596,311 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Conclusions" + "## Simplified Battery Model Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is an example of a somewhat more complicated model, in this case a battery. We will be implementing the simplified battery model introduced by [Gina Sierra, et. al.](https://www.sciencedirect.com/science/article/pii/S0951832018301406)\n", + "\n", + "First, we will import `PrognosticsModel`, which the parent class for all ProgPy models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy import PrognosticsModel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### State Transition" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first step to creating a physics-based model is implementing state transition. From the paper we see one state (SOC) and one state transition equation:\n", + "\n", + "$$SOC(k+1) = SOC(k) - P(k)*\\Delta t * E_{crit}^{-1} + w_2(k)$$\n", + "\n", + "where $k$ is discrete time. The $w$ term is process noise. This can be omitted, since it's handled by ProgPy. \n", + "\n", + "In this equation we see one input ($P$, power). Note that the previous battery model uses current, where this uses power. With this information, we can start defining our model. First, we start by declaring our inputs and states:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(PrognosticsModel):\n", + " inputs = [\"P\"]\n", + " states = [\"SOC\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we define parameters. In this case the parameters are the initial `SOC` state (1) and the `E_crit` (Internal Total Energy). We get the value for $E_{crit}$ from the paper.\n", + "\n", + "***Note:** This won't actually subclass in practice, but it's used to break apart model definition into chunks.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858, # Internal Total Energy\n", + " \"x0\": {\n", + " \"SOC\": 1, # State of Charge\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We know that SOC will always be between 0 and 1, so we can specify that explicitly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " state_limits = {\n", + " \"SOC\": (0.0, 1.0),\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we define the state transition equation. There are two methods for doing this: `dx` (for continuous) and `next_state` (for discrete). We will use the `dx` function since the model is continuous." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " def dx(self, x, u):\n", + " return self.StateContainer({\"SOC\": -u[\"P\"] / self[\"E_crit\"]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that state transition is defined, the next step is to define the outputs of the function. From the paper we have the following output equations:\n", + "\n", + "$$v(k) = v_{oc}(k) - i(k) * R_{int} + \\eta (k)$$\n", + "\n", + "$$v_{oc}(k) = v_L - \\lambda ^ {\\gamma * SOC(k)} - \\mu * e ^ {-\\beta * \\sqrt{SOC(k)}}$$\n", + "\n", + "$$i(k) = \\frac{v_{oc}(k) - \\sqrt{v_{oc}(k)^2 - 4 * R_{int} * P(k)}}{2 * R_{int}(k)}$$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is one output here (v, voltage), the same one input (P, Power), and a few lumped parameters: $v_L$, $\\lambda$, $\\gamma$, $\\mu$, $\\beta$, and $R_{int}$. The default parameters are found in the paper.\n", + "\n", + "$\\eta$ is the measurement noise, which ProgPy handles, so that's omitted from the equation below.\n", + "\n", + "***Note**: There is a typo in the paper where the sign of the second term in the $v_{oc}$ term. It should be negative (like above), but is reported as positive in the paper.*\n", + "\n", + "We can update the definition of the model to include this output and parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " outputs = [\"v\"]\n", + "\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858,\n", + " \"v_L\": 11.148,\n", + " \"lambda\": 0.046,\n", + " \"gamma\": 3.355,\n", + " \"mu\": 2.759,\n", + " \"beta\": 8.482,\n", + " \"R_int\": 0.027,\n", + " \"x0\": {\n", + " \"SOC\": 1,\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The input ($P(k)$) is also used in the output equation, which means it's part of the state of the system. We will update the states in `next_state`. \n", + "\n", + "Remember that in the earlier example, we defined the state transition with ProgPy's `dx` method because the model was continuous. Here, with the addition of power, the model becomes discrete, so we must now use ProgPy's `next_state` method to define state transition." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " states = [\"SOC\", \"P\"]\n", + "\n", + " def next_state(self, x, u, dt):\n", + " x[\"SOC\"] = x[\"SOC\"] - u[\"P\"] * dt / self[\"E_crit\"]\n", + " x[\"P\"] = u[\"P\"]\n", + "\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will also add a default `P` state." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858,\n", + " \"v_L\": 11.148,\n", + " \"lambda\": 0.046,\n", + " \"gamma\": 3.355,\n", + " \"mu\": 2.759,\n", + " \"beta\": 8.482,\n", + " \"R_int\": 0.027,\n", + " \"x0\": {\n", + " \"SOC\": 1,\n", + " \"P\": 0.01, # Added P\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we're ready to define the output equations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import math\n", + "\n", + "\n", + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " def output(self, x):\n", + " v_oc = (\n", + " self[\"v_L\"]\n", + " - self[\"lambda\"] ** (self[\"gamma\"] * x[\"SOC\"])\n", + " - self[\"mu\"] * math.exp(-self[\"beta\"] * math.sqrt(x[\"SOC\"]))\n", + " )\n", + " i = (v_oc - math.sqrt(v_oc**2 - 4 * self[\"R_int\"] * x[\"P\"])) / (\n", + " 2 * self[\"R_int\"]\n", + " )\n", + " v = v_oc - i * self[\"R_int\"]\n", + " return self.OutputContainer({\"v\": v})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Events\n", + "Finally we can define events. This is an easy case because our event state (`SOC`) is part of the model state. So we will simply define a single event (`EOD`: End of Discharge), where `SOC` is progress towards that event." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " events = [\"EOD\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then for our event state, we simply extract the relevant state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " def event_state(self, x):\n", + " return {\"EOD\": x[\"SOC\"]}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The threshold of the event is defined as the state where the event state (`EOD`) is 0.\n", + "\n", + "We've now defined a complete model. Now it's ready to be used for state estimation or prognostics, like any model distributed with ProgPy.\n", + "\n", + "Note that this model can be extended by changing the parameters `ecrit` and `r` to steady states. This will help the model account for the effects of aging, since they will be estimated with each state estimation step." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" ] }, { @@ -1627,7 +1909,7 @@ "source": [ "In these examples, we have described how to create new physics-based models. We have illustrated how to construct a generic physics-based model, as well as highlighted some specific types of models including linear models and direct models. We highlighted the matrix data access feature for using matrix operations more efficiently. Additionally, we discussed a few important components of any prognostics model including derived parameters, state limits, and events. \n", "\n", - "With these tools, users are well-equipped to build their own prognostics models for their specific physics-based use-cases. In the next example, we'll discuss how to create data-driven models." + "With these tools, users are well-equipped to build their own prognostics models for their specific physics-based use-cases. In the next notebook __[05 Data Driven](05_Data%20Driven.ipynb)__, we'll discuss how to create data-driven models." ] } ], @@ -1647,7 +1929,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.0" + "version": "3.13.0" }, "orig_nbformat": 4, "vscode": { diff --git a/docs/_downloads/43457c5023c173fd9899433adc3e4b5b/sim_battery_eol.py b/docs/_downloads/43457c5023c173fd9899433adc3e4b5b/sim_battery_eol.py index 6f775e8d..5f1df322 100644 --- a/docs/_downloads/43457c5023c173fd9899433adc3e4b5b/sim_battery_eol.py +++ b/docs/_downloads/43457c5023c173fd9899433adc3e4b5b/sim_battery_eol.py @@ -2,24 +2,25 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). +Example of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). """ import matplotlib.pyplot as plt from progpy.models import BatteryElectroChem as Battery -def run_example(): + +def run_example(): # Step 1: Create a model object batt = Battery() - # Step 2: Define future loading function - # Here we're using a function designed to charge until 0.95, + # Step 2: Define future loading function + # Here we're using a function designed to charge until 0.95, # then discharge until 0.05 load = 1 def future_loading(t, x=None): - nonlocal load + nonlocal load # Rule for loading after initialization if x is not None: @@ -30,26 +31,33 @@ def future_loading(t, x=None): elif event_state["EOD"] < 0.05: load = -1 # Charge # Rule for loading at initialization - return batt.InputContainer({'i': load}) + return batt.InputContainer({"i": load}) # Step 3: Simulate to Capacity is insufficient Threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") options = { - 'save_freq': 1000, # Frequency at which results are saved - 'dt': 2, # Timestep - 'threshold_keys': ['InsufficientCapacity'], # Simulate to InsufficientCapacity - 'print': True + "save_freq": 1000, # Frequency at which results are saved + "dt": 2, # Timestep + "threshold_keys": ["InsufficientCapacity"], # Simulate to InsufficientCapacity + "print": True, } simulated_results = batt.simulate_to_threshold(future_loading, **options) # Step 4: Plot Results - simulated_results.inputs.plot(ylabel='Current drawn (amps)') - simulated_results.event_states.plot(ylabel='Event States', labels={'EOD': 'State of Charge (SOC)', 'InsufficientCapacity': 'State of Health (SOH)'}) + simulated_results.inputs.plot(ylabel="Current drawn (amps)") + simulated_results.event_states.plot( + ylabel="Event States", + labels={ + "EOD": "State of Charge (SOC)", + "InsufficientCapacity": "State of Health (SOH)", + }, + ) plt.ylim([0, 1]) plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/445cf7bb5872bc3fb4468614273223ce/thrown_object_example.py b/docs/_downloads/445cf7bb5872bc3fb4468614273223ce/thrown_object_example.py index 5f6a29a7..4689ed1b 100644 --- a/docs/_downloads/445cf7bb5872bc3fb4468614273223ce/thrown_object_example.py +++ b/docs/_downloads/445cf7bb5872bc3fb4468614273223ce/thrown_object_example.py @@ -2,11 +2,11 @@ """ This example performs a state estimation and prediction with uncertainty given a Prognostics Model. Unlike basic_example, this example uses a model with multiple events (ThrownObject). Prediction only ends when all events are met - + Method: An instance of the Thrown Object model in prog_models is created, and the prediction process is achieved in three steps: 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate 2) Prediction of future states (with uncertainty) and the times at which the event thresholds will be reached -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) """ @@ -19,11 +19,13 @@ from pprint import pprint + def run_example(): # Step 1: Setup model & future loading - m = ThrownObject(process_noise = 0.25, measurement_noise = 0.2) + m = ThrownObject(process_noise=0.25, measurement_noise=0.2) initial_state = m.initialize() - def future_loading(t, x = None): + + def future_loading(t, x=None): return m.InputContainer({}) # Step 2: Demonstrating state estimator @@ -31,13 +33,15 @@ def future_loading(t, x = None): # Step 2a: Setup NUM_SAMPLES = 1000 - filt = state_estimators.ParticleFilter(m, initial_state, num_particles = NUM_SAMPLES) + filt = state_estimators.ParticleFilter(m, initial_state, num_particles=NUM_SAMPLES) # VVV Uncomment this to use UKF State Estimator VVV # filt = state_estimators.UnscentedKalmanFilter(batt, initial_state) # Step 2b: Print & Plot Prior State u = m.InputContainer({}) # No input for ThrownObject - z = m.output(initial_state) # Measured output (here as an example we use the model output instead) + z = m.output( + initial_state + ) # Measured output (here as an example we use the model output instead) # Note: In practice, replace this with actual measured data filt.estimate(0.1, u, z) @@ -56,13 +60,15 @@ def future_loading(t, x = None): mc_results = mc.predict(samples, future_loading, dt=STEP_SIZE, horizon=8) print("\nPredicted Time of Event:") pprint(mc_results.time_of_event.metrics()) # Note this takes some time - mc_results.time_of_event.plot_hist(keys = 'impact') - mc_results.time_of_event.plot_hist(keys = 'falling') - + mc_results.time_of_event.plot_hist(keys="impact") + mc_results.time_of_event.plot_hist(keys="falling") + # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/44de953994033adbab05302cdc8c05e4/horizon.py b/docs/_downloads/44de953994033adbab05302cdc8c05e4/horizon.py new file mode 100644 index 00000000..74d1ec24 --- /dev/null +++ b/docs/_downloads/44de953994033adbab05302cdc8c05e4/horizon.py @@ -0,0 +1,70 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + +""" +This example performs a state estimation and prediction with uncertainty given a Prognostics Model with a specific prediction horizon. This prediction horizon marks the end of the "time of interest" for the prediction. Often this represents the end of a mission or sufficiently in the future where the user is unconcerned with the events + +Method: An instance of the Thrown Object model in progpy is created, and the prediction process is achieved in three steps: + 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate + 2) Prediction of future states (with uncertainty) and the times at which the event thresholds will be reached, within the prediction horizon. All events outside the horizon come back as None and are ignored in metrics + +Results: + i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction + ii) Time event is predicted to occur (with uncertainty) +""" + +import numpy as np +from progpy.models.thrown_object import ThrownObject +from progpy.predictors import MonteCarlo +from progpy.uncertain_data import MultivariateNormalDist +from pprint import pprint + + +def run_example(): + # Step 1: Setup model & future loading + m = ThrownObject(process_noise=0.5, measurement_noise=0.15) + initial_state = m.initialize() + + NUM_SAMPLES = 1000 + x = MultivariateNormalDist( + initial_state.keys(), + initial_state.values(), + np.diag([x_i * 0.01 for x_i in initial_state.values()]), + ) + + # Step 2: Demonstrating Predictor + print("\nPerforming Prediction Step...") + + # Step 2a: Setup Predictor + mc = MonteCarlo(m) + + # Step 2b: Perform a prediction + # THIS IS WHERE WE DIVERGE FROM THE THROWN_OBJECT_EXAMPLE + # Here we set a prediction horizon + # We're saying we are not interested in any events that occur after this time + PREDICTION_HORIZON = 7.7 + STEP_SIZE = 0.01 + mc_results = mc.predict( + x, n_samples=NUM_SAMPLES, dt=STEP_SIZE, horizon=PREDICTION_HORIZON + ) + + print("\nPredicted Time of Event:") + metrics = mc_results.time_of_event.metrics() + pprint(metrics) # Note this takes some time + mc_results.time_of_event.plot_hist(keys="impact") + mc_results.time_of_event.plot_hist(keys="falling") + + print( + "\nSamples where impact occurs before horizon: {:.2f}%".format( + metrics["impact"]["number of samples"] / NUM_SAMPLES * 100 + ) + ) + + # Step 4: Show all plots + import matplotlib.pyplot as plt # For plotting + + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/4608d3a7496861ab8290f250931dec05/matrix_model.py b/docs/_downloads/4608d3a7496861ab8290f250931dec05/matrix_model.py index 5ec088b4..6c6f10d4 100644 --- a/docs/_downloads/4608d3a7496861ab8290f250931dec05/matrix_model.py +++ b/docs/_downloads/4608d3a7496861ab8290f250931dec05/matrix_model.py @@ -7,6 +7,7 @@ In this example, a model is designed to simulate a thrown object using matrix notation (instead of dictionary notation as in the standard model). The implementation of the model is comparable to a standard model, except that it uses the x.matrix, u.matrix, and z.matirx to compute matrix operations within each function. """ + def run_example(): from prog_models import PrognosticsModel import numpy as np @@ -17,33 +18,36 @@ class ThrownObject(PrognosticsModel): inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] + "x", # Position (m) + "v", # Velocity (m/s) + ] outputs = [ - 'x' # Position (m) + "x" # Position (m) ] events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] is_vectorized = True # The Default parameters. Overwritten by passing parameters dictionary into constructor default_parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81, # Acceleration due to gravity in m/s^2 - 'process_noise': 0.0 # amount of noise in each step + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 + "process_noise": 0.0, # amount of noise in each step } # Define the model equations - def initialize(self, u = None, z = None): + def initialize(self, u=None, z=None): # Note: states are returned using StateContainer - return self.StateContainer({ - 'x': self.parameters['thrower_height'], - 'v': self.parameters['throwing_speed']}) + return self.StateContainer( + { + "x": self.parameters["thrower_height"], + "v": self.parameters["throwing_speed"], + } + ) def next_state(self, x, u, dt): # Here we will use the matrix version for each variable @@ -52,29 +56,30 @@ def next_state(self, x, u, dt): # and u.matrix is in the order of model.inputs, above A = np.array([[0, 1], [0, 0]]) # State transition matrix - B = np.array([[0], [self.parameters['g']]]) # Acceleration due to gravity + B = np.array([[0], [self.parameters["g"]]]) # Acceleration due to gravity x.matrix += (np.matmul(A, x.matrix) + B) * dt return x - + def output(self, x): # Note- states can still be accessed a dictionary - return self.OutputContainer({'x': x['x']}) + return self.OutputContainer({"x": x["x"]}) # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. # Threshold = Event State == 0. However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height - x_max = np.where(x['v'] > 0, x['x'], x_max) # 1 until falling begins + def event_state(self, x): + x_max = x["x"] + np.square(x["v"]) / ( + -self.parameters["g"] * 2 + ) # Use speed and position to estimate maximum height + x_max = np.where(x["v"] > 0, x["x"], x_max) # 1 until falling begins return { - 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': np.maximum(x['x']/x_max,0) # then it's fraction of height + "falling": np.maximum( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": np.maximum(x["x"] / x_max, 0), # then it's fraction of height } # Now we can use the model @@ -83,23 +88,21 @@ def event_state(self, x): # Use the model x = thrown_object.initialize() - print('State at 0.1 seconds: ', thrown_object.next_state(x, {}, 0.1)) + print("State at 0.1 seconds: ", thrown_object.next_state(x, {}, 0.1)) # But you can also initialize state directly, like so: - x = thrown_object.StateContainer({'x': 1.93, 'v': 40}) - print('State at 0.1 seconds: ', thrown_object.next_state(x, None, 0.1)) + x = thrown_object.StateContainer({"x": 1.93, "v": 40}) + print("State at 0.1 seconds: ", thrown_object.next_state(x, None, 0.1)) # Now lets use it for simulation. def future_loading(t, x=None): return thrown_object.InputContainer({}) thrown_object.simulate_to_threshold( - future_loading, - print = True, - threshold_keys = 'impact', - dt = 0.1, - save_freq = 1) - -# This allows the module to be executed directly -if __name__ == '__main__': + future_loading, print=True, threshold_keys="impact", dt=0.1, save_freq=1 + ) + + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/4665ebe5ec48228f3eb06ed7cb37fbac/param_est.ipynb b/docs/_downloads/4665ebe5ec48228f3eb06ed7cb37fbac/param_est.ipynb index 9d56072d..eb5f4c3a 100644 --- a/docs/_downloads/4665ebe5ec48228f3eb06ed7cb37fbac/param_est.ipynb +++ b/docs/_downloads/4665ebe5ec48228f3eb06ed7cb37fbac/param_est.ipynb @@ -54,16 +54,16 @@ "outputs": [], "source": [ "times = [0, 1, 2, 3, 4, 5, 6, 7]\n", - "inputs = [{}]*8\n", + "inputs = [{}] * 8\n", "outputs = [\n", - " {'x': 1.83},\n", - " {'x': 36.5091999066245},\n", - " {'x': 60.05364349596605},\n", - " {'x': 73.23733081022635},\n", - " {'x': 76.47528104941956},\n", - " {'x': 69.9146810161441},\n", - " {'x': 53.74272753819968},\n", - " {'x': 28.39355725512131},\n", + " {\"x\": 1.83},\n", + " {\"x\": 36.5091999066245},\n", + " {\"x\": 60.05364349596605},\n", + " {\"x\": 73.23733081022635},\n", + " {\"x\": 76.47528104941956},\n", + " {\"x\": 69.9146810161441},\n", + " {\"x\": 53.74272753819968},\n", + " {\"x\": 28.39355725512131},\n", "]" ] }, @@ -119,7 +119,7 @@ "metadata": {}, "outputs": [], "source": [ - "keys = ['thrower_height', 'throwing_speed', 'g']" + "keys = [\"thrower_height\", \"throwing_speed\", \"g\"]" ] }, { @@ -137,10 +137,10 @@ "outputs": [], "source": [ "# Printing state before\n", - "print('Model configuration before')\n", + "print(\"Model configuration before\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" ] }, { @@ -159,7 +159,7 @@ "metadata": {}, "outputs": [], "source": [ - "m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1)" + "m.estimate_params(times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1)" ] }, { @@ -176,10 +176,10 @@ "metadata": {}, "outputs": [], "source": [ - "print('\\nOptimized configuration')\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" ] }, { @@ -222,11 +222,13 @@ "outputs": [], "source": [ "m = ThrownObject(thrower_height=20, throwing_speed=3.1, g=15)\n", - "m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1, tol=1e-6)\n", - "print('\\nOptimized configuration')\n", + "m.estimate_params(\n", + " times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1, tol=1e-6\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" ] }, { @@ -262,15 +264,23 @@ "metadata": {}, "outputs": [], "source": [ - "m.parameters['thrower_height'] = 3.1\n", - "m.parameters['throwing_speed'] = 29\n", + "m.parameters[\"thrower_height\"] = 3.1\n", + "m.parameters[\"throwing_speed\"] = 29\n", "\n", "# Using MAE, or Mean Absolute Error instead of the default Mean Squared Error.\n", - "m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1, tol=1e-9, error_method='MAX_E')\n", - "print('\\nOptimized configuration')\n", + "m.estimate_params(\n", + " times=times,\n", + " inputs=inputs,\n", + " outputs=outputs,\n", + " keys=keys,\n", + " dt=0.1,\n", + " tol=1e-9,\n", + " error_method=\"MAX_E\",\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1, method='MAX_E'))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1, method=\"MAX_E\"))" ] }, { @@ -307,14 +317,14 @@ "metadata": {}, "outputs": [], "source": [ - "m = ThrownObject(process_noise = 1)\n", - "results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n", + "m = ThrownObject(process_noise=1)\n", + "results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", "\n", "# Resetting parameters to their incorrectly set values.\n", - "m.parameters['thrower_height'] = 20\n", - "m.parameters['throwing_speed'] = 3.1\n", - "m.parameters['g'] = 15\n", - "keys = ['thrower_height', 'throwing_speed', 'g']" + "m.parameters[\"thrower_height\"] = 20\n", + "m.parameters[\"throwing_speed\"] = 3.1\n", + "m.parameters[\"g\"] = 15\n", + "keys = [\"thrower_height\", \"throwing_speed\", \"g\"]" ] }, { @@ -323,11 +333,13 @@ "metadata": {}, "outputs": [], "source": [ - "m.estimate_params(times = results.times, inputs = results.inputs, outputs = results.outputs, keys = keys)\n", - "print('\\nOptimized configuration')\n", + "m.estimate_params(\n", + " times=results.times, inputs=results.inputs, outputs=results.outputs, keys=keys\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "print(' Error: ', m.calc_error(results.times, results.inputs, results.outputs))" + "print(\" Error: \", m.calc_error(results.times, results.inputs, results.outputs))" ] }, { @@ -351,6 +363,7 @@ "# Creating a new model with the original parameters to compare to the model with noise.\n", "true_Values = ThrownObject()\n", "\n", + "\n", "# Function to determine the Absolute Mean Error (AME) of the model parameters.\n", "def AME(m, keys):\n", " error = 0\n", @@ -391,17 +404,23 @@ "outputs": [], "source": [ "for count in range(10):\n", - " m = ThrownObject(process_noise = 1)\n", - " results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n", - " \n", + " m = ThrownObject(process_noise=1)\n", + " results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", + "\n", " # Resetting parameters to their originally incorrectly set values.\n", - " m.parameters['thrower_height'] = 20\n", - " m.parameters['throwing_speed'] = 3.1\n", - " m.parameters['g'] = 15\n", + " m.parameters[\"thrower_height\"] = 20\n", + " m.parameters[\"throwing_speed\"] = 3.1\n", + " m.parameters[\"g\"] = 15\n", "\n", - " m.estimate_params(times = results.times, inputs = results.inputs, outputs = results.outputs, keys = keys, dt=0.1)\n", - " error = AME(m, ['thrower_height', 'throwing_speed', 'g'])\n", - " print(f'Estimate Call Number {count} - AME Error {error}')" + " m.estimate_params(\n", + " times=results.times,\n", + " inputs=results.inputs,\n", + " outputs=results.outputs,\n", + " keys=keys,\n", + " dt=0.1,\n", + " )\n", + " error = AME(m, [\"thrower_height\", \"throwing_speed\", \"g\"])\n", + " print(f\"Estimate Call Number {count} - AME Error {error}\")" ] }, { @@ -421,7 +440,7 @@ "times, inputs, outputs = [], [], []\n", "m = ThrownObject(process_noise=1)\n", "for count in range(20):\n", - " results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n", + " results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", " times.append(results.times)\n", " inputs.append(results.inputs)\n", " outputs.append(results.outputs)" @@ -441,9 +460,9 @@ "metadata": {}, "outputs": [], "source": [ - "m.parameters['thrower_height'] = 20\n", - "m.parameters['throwing_speed'] = 3.1\n", - "m.parameters['g'] = 15" + "m.parameters[\"thrower_height\"] = 20\n", + "m.parameters[\"throwing_speed\"] = 3.1\n", + "m.parameters[\"g\"] = 15" ] }, { @@ -461,11 +480,11 @@ "outputs": [], "source": [ "m.estimate_params(times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1)\n", - "print('\\nOptimized configuration')\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m.parameters[key])\n", - "error = AME(m, ['thrower_height', 'throwing_speed', 'g'])\n", - "print('AME Error: ', error)" + "error = AME(m, [\"thrower_height\", \"throwing_speed\", \"g\"])\n", + "print(\"AME Error: \", error)" ] }, { diff --git a/docs/_downloads/46d1f3f0ff1be63e0a4a42caa85a65f5/ensemble.py b/docs/_downloads/46d1f3f0ff1be63e0a4a42caa85a65f5/ensemble.py index ee0d9c8b..3591ee2d 100644 --- a/docs/_downloads/46d1f3f0ff1be63e0a4a42caa85a65f5/ensemble.py +++ b/docs/_downloads/46d1f3f0ff1be63e0a4a42caa85a65f5/ensemble.py @@ -6,13 +6,13 @@ .. dropdown:: More details - Ensemble model is an approach to modeling where one or more different models are simulated together and then aggregated into a single prediction. This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors. + Ensemble model is an approach to modeling where one or more different models are simulated together and then aggregated into a single prediction. This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors. - In this example, 4 different equivalent circuit models are setup with different configuration parameters. They are each simulated individually. Then an ensemble model is created for the 4 models, and that is simulated individually. The results are plotted. + In this example, 4 different equivalent circuit models are setup with different configuration parameters. They are each simulated individually. Then an ensemble model is created for the 4 models, and that is simulated individually. The results are plotted. The results are partially skewed by a poorly configured model, so we change the aggregation method to account for that. and resimulate, showing the results - Finally, an ensemble model is created for two different models with different states. That model is simulated with time and the results are plotted. + Finally, an ensemble model is created for two different models with different states. That model is simulated with time and the results are plotted. """ from matplotlib import pyplot as plt @@ -21,48 +21,65 @@ from progpy.datasets import nasa_battery from progpy.models import BatteryElectroChemEOD, BatteryCircuit + def run_example(): # Example 1: Different model configurations # Download data - print('downloading data (this may take a while)...') + print("downloading data (this may take a while)...") data = nasa_battery.load_data(8)[1] # Prepare data RUN_ID = 0 - test_input = [{'i': i} for i in data[RUN_ID]['current']] - test_time = data[RUN_ID]['relativeTime'] + test_input = [{"i": i} for i in data[RUN_ID]["current"]] + test_time = data[RUN_ID]["relativeTime"] # Setup models - # In this case, we have some uncertainty on the parameters of the model, + # In this case, we have some uncertainty on the parameters of the model, # so we're setting up a few versions of the circuit model with different parameters. - print('Setting up models...') - m_circuit = BatteryCircuit(process_noise = 0, measurement_noise = 0) - m_circuit_2 = BatteryCircuit(process_noise = 0, measurement_noise = 0, qMax = 7860) - m_circuit_3 = BatteryCircuit(process_noise = 0, measurement_noise = 0, qMax = 6700, Rs = 0.055) - m_ensemble = EnsembleModel((m_circuit, m_circuit_2, m_circuit_3), process_noise = 0, measurement_noise = 0) + print("Setting up models...") + m_circuit = BatteryCircuit(process_noise=0, measurement_noise=0) + m_circuit_2 = BatteryCircuit(process_noise=0, measurement_noise=0, qMax=7860) + m_circuit_3 = BatteryCircuit( + process_noise=0, measurement_noise=0, qMax=6700, Rs=0.055 + ) + m_ensemble = EnsembleModel( + (m_circuit, m_circuit_2, m_circuit_3), process_noise=0, measurement_noise=0 + ) # Evaluate models - print('Evaluating models...') + print("Evaluating models...") + def future_loading(t, x=None): for i, mission_time in enumerate(test_time): if mission_time > t: return m_circuit.InputContainer(test_input[i]) + results_ensemble = m_ensemble.simulate_to(test_time.iloc[-1], future_loading) # Plot results - print('Producing figures...') - plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth') - plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='red', label='ensemble') + print("Producing figures...") + plt.plot(test_time, data[RUN_ID]["voltage"], color="green", label="ground truth") + plt.plot( + results_ensemble.times, + [z["v"] for z in results_ensemble.outputs], + color="red", + label="ensemble", + ) plt.legend() # Note: This is a very poor performing model # there was an outlier model (m_circuit_3), which effected the quality of the model prediction # This can be resolved by using a different aggregation_method. For example, median # In a real scenario, you would likely remove this model, this is just to illustrate outlier elimination - print('Updating with Median ') - m_ensemble.parameters['aggregation_method'] = np.median + print("Updating with Median ") + m_ensemble.parameters["aggregation_method"] = np.median results_ensemble = m_ensemble.simulate_to(test_time.iloc[-1], future_loading) - plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='orange', label='ensemble - median') + plt.plot( + results_ensemble.times, + [z["v"] for z in results_ensemble.outputs], + color="orange", + label="ensemble - median", + ) plt.legend() # Example 2: Different Models @@ -71,31 +88,49 @@ def future_loading(t, x=None): # These two models share one state, but besides that they have different states # Setup Model - print('Setting up models...') - m_electro = BatteryElectroChemEOD(process_noise = 0, measurement_noise = 0) - m_ensemble = EnsembleModel((m_circuit, m_electro), process_noise = 0, measurement_noise=0) + print("Setting up models...") + m_electro = BatteryElectroChemEOD(process_noise=0, measurement_noise=0) + m_ensemble = EnsembleModel( + (m_circuit, m_electro), process_noise=0, measurement_noise=0 + ) # Evaluate models - print('Evaluating models...') - print('\tEnsemble') + print("Evaluating models...") + print("\tEnsemble") results_ensemble = m_ensemble.simulate_to(test_time.iloc[-1], future_loading) - print('\tCircuit 1') + print("\tCircuit 1") results_circuit1 = m_circuit.simulate_to(test_time.iloc[-1], future_loading) - print('\tElectroChem') + print("\tElectroChem") results_electro = m_electro.simulate_to(test_time.iloc[-1], future_loading) # Plot results - print('Producing figures...') + print("Producing figures...") plt.figure() - plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth') - plt.plot(results_circuit1.times, [z['v'] for z in results_circuit1.outputs], color='blue', label='circuit') - plt.plot(results_electro.times, [z['v'] for z in results_electro.outputs], color='red', label='electro chemistry') - plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='yellow', label='ensemble') + plt.plot(test_time, data[RUN_ID]["voltage"], color="green", label="ground truth") + plt.plot( + results_circuit1.times, + [z["v"] for z in results_circuit1.outputs], + color="blue", + label="circuit", + ) + plt.plot( + results_electro.times, + [z["v"] for z in results_electro.outputs], + color="red", + label="electro chemistry", + ) + plt.plot( + results_ensemble.times, + [z["v"] for z in results_ensemble.outputs], + color="yellow", + label="ensemble", + ) plt.legend() - - # Note that the result may not be exactly between the other two models. + + # Note that the result may not be exactly between the other two models. # This is because of aggregation is done in 2 steps: at state transition and then at output calculation -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/48e4811fdf247ee4b7a7e76d23becfe3/noise.py b/docs/_downloads/48e4811fdf247ee4b7a7e76d23becfe3/noise.py index 87c2ecf3..b7d356d6 100644 --- a/docs/_downloads/48e4811fdf247ee4b7a7e76d23becfe3/noise.py +++ b/docs/_downloads/48e4811fdf247ee4b7a7e76d23becfe3/noise.py @@ -9,106 +9,124 @@ from progpy.models.thrown_object import ThrownObject + def run_example(): # Define future loading - def future_load(t=None, x=None): + def future_load(t=None, x=None): # The thrown object model has no inputs- you cannot load the system (i.e., affect it once it's in the air) # So we return an empty input container return m.InputContainer({}) # Define configuration for simulation config = { - 'threshold_keys': 'impact', # Simulate until the thrown object has impacted the ground - 'dt': 0.005, # Time step (s) - 'save_freq': 0.5, # Frequency at which results are saved (s) + "threshold_keys": "impact", # Simulate until the thrown object has impacted the ground + "dt": 0.005, # Time step (s) + "save_freq": 0.5, # Frequency at which results are saved (s) } # Define a function to print the results - will be used later def print_results(simulated_results): # Print results - print('states:') - for (t,x) in zip(simulated_results.times, simulated_results.states): - print('\t{:.2f}s: {}'.format(t, x)) + print("states:") + for t, x in zip(simulated_results.times, simulated_results.states): + print("\t{:.2f}s: {}".format(t, x)) - print('outputs:') - for (t,x) in zip(simulated_results.times, simulated_results.outputs): - print('\t{:.2f}s: {}'.format(t, x)) + print("outputs:") + for t, x in zip(simulated_results.times, simulated_results.outputs): + print("\t{:.2f}s: {}".format(t, x)) - print('\nimpact time: {:.2f}s'.format(simulated_results.times[-1])) + print("\nimpact time: {:.2f}s".format(simulated_results.times[-1])) # The simulation stopped at impact, so the last element of times is the impact time # Plot results simulated_results.states.plot() # Ex1: No noise - m = ThrownObject(process_noise = False) + m = ThrownObject(process_noise=False) simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex1: No noise') + plt.title("Ex1: No noise") # Ex2: with noise - same noise applied to every state process_noise = 15 - m = ThrownObject(process_noise = process_noise) # Noise with a std of 0.5 to every state - print('\nExample without same noise for every state') + m = ThrownObject( + process_noise=process_noise + ) # Noise with a std of 0.5 to every state + print("\nExample without same noise for every state") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex2: Basic Noise') + plt.title("Ex2: Basic Noise") # Ex3: noise- more noise on position than velocity - process_noise = {'x': 30, 'v': 1} - m = ThrownObject(process_noise = process_noise) - print('\nExample with more noise on position than velocity') + process_noise = {"x": 30, "v": 1} + m = ThrownObject(process_noise=process_noise) + print("\nExample with more noise on position than velocity") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex3: More noise on position') + plt.title("Ex3: More noise on position") # Ex4: noise- Ex3 but uniform - process_noise_dist = 'uniform' - model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise} - m = ThrownObject(**model_config) - print('\nExample with more uniform noise') + process_noise_dist = "uniform" + model_config = { + "process_noise_dist": process_noise_dist, + "process_noise": process_noise, + } + m = ThrownObject(**model_config) + print("\nExample with more uniform noise") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex4: Ex3 with uniform dist') + plt.title("Ex4: Ex3 with uniform dist") # Ex5: noise- Ex3 but triangle - process_noise_dist = 'triangular' - model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise} - m = ThrownObject(**model_config) - print('\nExample with triangular process noise') + process_noise_dist = "triangular" + model_config = { + "process_noise_dist": process_noise_dist, + "process_noise": process_noise, + } + m = ThrownObject(**model_config) + print("\nExample with triangular process noise") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex5: Ex3 with triangular dist') + plt.title("Ex5: Ex3 with triangular dist") # Ex6: Measurement noise # Everything we've done with process noise, we can also do with measurement noise. - # Just use 'measurement_noise' and 'measurement_noise_dist' - measurement_noise = {'x': 20} # For each output - measurement_noise_dist = 'uniform' - model_config = {'measurement_noise_dist': measurement_noise_dist, 'measurement_noise': measurement_noise} - m = ThrownObject(**model_config) - print('\nExample with measurement noise') - print('- Note: outputs are different than state- this is the application of measurement noise') + # Just use 'measurement_noise' and 'measurement_noise_dist' + measurement_noise = {"x": 20} # For each output + measurement_noise_dist = "uniform" + model_config = { + "measurement_noise_dist": measurement_noise_dist, + "measurement_noise": measurement_noise, + } + m = ThrownObject(**model_config) + print("\nExample with measurement noise") + print( + "- Note: outputs are different than state- this is the application of measurement noise" + ) simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex6: Measurement noise') + plt.title("Ex6: Measurement noise") # Ex7: OK, now for something a little more complicated. Let's try proportional noise on v only (more variation when it's going faster) # This can be used to do custom or more complex noise distributions - def apply_proportional_process_noise(self, x, dt = 1): - x['v'] -= dt*0.5*x['v'] + def apply_proportional_process_noise(self, x, dt=1): + x["v"] -= dt * 0.5 * x["v"] return x - model_config = {'process_noise': apply_proportional_process_noise} + + model_config = {"process_noise": apply_proportional_process_noise} m = ThrownObject(**model_config) - print('\nExample with proportional noise on velocity') + print("\nExample with proportional noise on velocity") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex7: Proportional noise on velocity') + plt.title("Ex7: Proportional noise on velocity") - print('\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value') - print('e.g., numpy.random.seed(42)') + print( + "\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value" + ) + print("e.g., numpy.random.seed(42)") plt.show() -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/4b1eefadd9a943cb2d49442a984fa187/linear_model.py b/docs/_downloads/4b1eefadd9a943cb2d49442a984fa187/linear_model.py index d798fd65..4f269f5a 100644 --- a/docs/_downloads/4b1eefadd9a943cb2d49442a984fa187/linear_model.py +++ b/docs/_downloads/4b1eefadd9a943cb2d49442a984fa187/linear_model.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -This example shows the use of the LinearModel class, a subclass of PrognosticsModel for models that can be described as a linear time series. +This example shows the use of the LinearModel class, a subclass of PrognosticsModel for models that can be described as a linear time series. The model is used in a simulation, and the state is printed every second """ @@ -10,6 +10,7 @@ from prog_models import LinearModel import numpy as np + class ThrownObject(LinearModel): """ Model that similates an object thrown into the air without air resistance @@ -30,8 +31,8 @@ class ThrownObject(LinearModel): Keyword Args ------------ process_noise : Optional, float or Dict[Srt, float] - Process noise (applied at dx/next_state). - Can be number (e.g., .2) applied to every state, a dictionary of values for each + Process noise (applied at dx/next_state). + Can be number (e.g., .2) applied to every state, a dictionary of values for each state (e.g., {'x1': 0.2, 'x2': 0.3}), or a function (x) -> x process_noise_dist : Optional, String distribution for process noise (e.g., normal, uniform, triangular) @@ -51,60 +52,75 @@ class ThrownObject(LinearModel): inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] + "x", # Position (m) + "v", # Velocity (m/s) + ] outputs = [ - 'x' # Position (m) + "x" # Position (m) ] events = [ - 'impact' # Event- object has impacted ground + "impact" # Event- object has impacted ground ] - # These are the core of the linear model. + # These are the core of the linear model. # Linear models defined by the following equations: # * dx/dt = Ax + Bu + E # * z = Cx + D # * event states = Fx + G - A = np.array([[0, 1], [0, 0]]) # dx/dt = Ax + Bu + E - E = np.array([[0], [-9.81]]) # Acceleration due to gravity (m/s^2) - C = np.array([[1, 0]]) # z = Cx + D - F = None # Will override method + A = np.array([[0, 1], [0, 0]]) # dx/dt = Ax + Bu + E + E = np.array([[0], [-9.81]]) # Acceleration due to gravity (m/s^2) + C = np.array([[1, 0]]) # z = Cx + D + F = None # Will override method # The Default parameters. Overwritten by passing parameters dictionary into constructor default_parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81 # Acceleration due to gravity in m/s^2 + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 } def initialize(self, u=None, z=None): - return self.StateContainer({ - 'x': self.parameters['thrower_height'], # Thrown, so initial altitude is height of thrower - 'v': self.parameters['throwing_speed'] # Velocity at which the ball is thrown - this guy is a professional baseball pitcher - }) - + return self.StateContainer( + { + "x": self.parameters[ + "thrower_height" + ], # Thrown, so initial altitude is height of thrower + "v": self.parameters[ + "throwing_speed" + ], # Velocity at which the ball is thrown - this guy is a professional baseball pitcher + } + ) + # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. # Threshold = Event State == 0. However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height + def event_state(self, x): + x_max = x["x"] + np.square(x["v"]) / ( + -self.parameters["g"] * 2 + ) # Use speed and position to estimate maximum height return { - 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1 # 1 until falling begins, then it's fraction of height + "falling": np.maximum( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": np.maximum(x["x"] / x_max, 0) + if x["v"] < 0 + else 1, # 1 until falling begins, then it's fraction of height } + def run_example(): m = ThrownObject() + def future_loading(t, x=None): - return m.InputContainer({}) # No loading - m.simulate_to_threshold(future_loading, print = True, save_freq=1, threshold_keys='impact', dt=0.1) + return m.InputContainer({}) # No loading + + m.simulate_to_threshold( + future_loading, print=True, save_freq=1, threshold_keys="impact", dt=0.1 + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/4dfd843259a83464e8e48a02a37a07b0/02_Parameter Estimation.ipynb b/docs/_downloads/4dfd843259a83464e8e48a02a37a07b0/02_Parameter Estimation.ipynb index dc60b0e2..d203256b 100644 --- a/docs/_downloads/4dfd843259a83464e8e48a02a37a07b0/02_Parameter Estimation.ipynb +++ b/docs/_downloads/4dfd843259a83464e8e48a02a37a07b0/02_Parameter Estimation.ipynb @@ -13,20 +13,38 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Parameter estimation is used to tune the parameters of a general model so its behavior matches the behavior of a specific system. For example, parameters of the battery model can be tuned to configure the model to describe the behavior of a specific battery.\n", + "Parameter estimation is used to tune the parameters of a general model so that its behavior matches that of a specific system. For example, the parameters of a battery model can be tuned to configure the model to more accurately describe the behavior of a specific battery.\n", "\n", - "Generally, parameter estimation is done by tuning the parameters of the model so that simulation (see 1. Simulation) best matches the behavior observed in some available data. In ProgPy, this is done using the progpy.PrognosticsModel.estimate_params() method. This method takes input and output data from one or more runs, and uses scipy.optimize.minimize function to estimate the parameters of the model. For more information, refer to our Documentation [here](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation)\n", + "Generally, parameter estimation is done by tuning the parameters of the model so that the simulation (see __[01 Simulation](01_Simulation.ipynb)__) best matches the behavior observed in some available data. This is done using a mixture of data, knowledge (e.g., from system specs), and intuition. For large, complex models, it can be VERY difficult and computationally expensive.\n", + "\n", + "In ProgPy, parameter estimation is done using the `progpy.PrognosticsModel.estimate_params()` method. This method takes input and output data from one or more runs, and uses `scipy.optimize.minimize` function to estimate the parameters of the model. For more information, refer to the documentation [here](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation).\n", "\n", "A few definitions:\n", + "\n", "* __`keys`__ `(list[str])`: Parameter keys to optimize\n", "* __`times`__ `(list[float])`: Array of times for each run\n", "* __`inputs`__ `(list[InputContainer])`: Array of input containers where inputs[x] corresponds to times[x]\n", "* __`outputs`__ `(list[OutputContainer])`: Array of output containers where outputs[x] corresponds to times[x]\n", - "* __`method`__ `(str, optional)`: Optimization method- see scipy.optimize.minimize for options\n", + "* __`method`__ `(str, optional)`: Optimization method. See `scipy.optimize.minimize`\n", "* __`tol`__ `(int, optional)`: Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol\n", - "* __`error_method`__ `(str, optional)`: Method to use in calculating error. See calc_error for options\n", + "* __`error_method`__ `(str, optional)`: Method to use in calculating error. See [`calc_error`](https://nasa.github.io/progpy/api_ref/progpy/PrognosticModel.html?highlight=calc_error#progpy.PrognosticsModel.calc_error) for options\n", "* __`bounds`__ `(tuple or dict, optional)`: Bounds for optimization in format ((lower1, upper1), (lower2, upper2), ...) or {key1: (lower1, upper1), key2: (lower2, upper2), ...}\n", - "* __`options`__ `(dict, optional)`: Options passed to optimizer. See scipy.optimize.minimize for options" + "* __`options`__ `(dict, optional)`: Options passed to optimizer. See `scipy.optimize.minimize` for options" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Simple Example](#Simple-Example)\n", + "* [Using Tol](#Using-Tol)\n", + "* [Handling Noise with Multiple Runs](#Handling-Noise-with-Multiple-Runs)\n", + "* [Simplified Battery](#Simplified-Battery)\n", + " * [Data Prep](#Data-Prep)\n", + " * [Set Up Model](#Set-Up-Model)\n", + " * [Parameter Estimation](#Parameter-Estimation)\n", + "* [Conclusion](#Conclusion)" ] }, { @@ -34,7 +52,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### Example 1) Simple Example" + "## Simple Example" ] }, { @@ -42,7 +60,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we will show an example demonstrating model parameter estimation. In this example, we estimate the model parameters from data. In general, the data will usually be collected from the physical system or from a different model (model surrogacy). In this case, we will use the example data, below:" + "In this example, we estimate the model parameters from data. In general, the data will usually be collected from the physical system or from a different model (model surrogacy). In this case, we will use example data." ] }, { @@ -52,16 +70,16 @@ "outputs": [], "source": [ "times = [0, 1, 2, 3, 4, 5, 6, 7]\n", - "inputs = [{}]*8\n", + "inputs = [{}] * 8\n", "outputs = [\n", - " {'x': 1.83},\n", - " {'x': 36.5091999066245},\n", - " {'x': 60.05364349596605},\n", - " {'x': 73.23733081022635},\n", - " {'x': 76.47528104941956},\n", - " {'x': 69.9146810161441},\n", - " {'x': 53.74272753819968},\n", - " {'x': 28.39355725512131},\n", + " {\"x\": 1.83},\n", + " {\"x\": 36.5091999066245},\n", + " {\"x\": 60.05364349596605},\n", + " {\"x\": 73.23733081022635},\n", + " {\"x\": 76.47528104941956},\n", + " {\"x\": 69.9146810161441},\n", + " {\"x\": 53.74272753819968},\n", + " {\"x\": 28.39355725512131},\n", "]" ] }, @@ -70,7 +88,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First, we will import a model from the ProgPy Package. For this example we're using the simple ThrownObject model." + "First, we will import a model from the ProgPy Package. For this example, we will be using the simple ThrownObject model." ] }, { @@ -87,9 +105,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we can build a model with a best guess for the parameters.\n", - "\n", - "We will use a guess that our thrower is 20 meters tall, has a throwing speed of 3.1 m/s, and that acceleration due to gravity is 15 m/s^2. However, given our times, inputs, and outputs, we can clearly tell this is not true! Let's see if parameter estimation can fix this!" + "We can now build a model with a best guess for the parameters. We will guess that our thrower is 20 meters tall, has a throwing speed of 3.1 $m/s$, and that acceleration due to gravity is 15 $m/s^2$. However, given our times, inputs, and outputs, we can clearly tell this is not true! Let's see if parameter estimation can fix this." ] }, { @@ -106,9 +122,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For this example, we will define specific parameters that we want to estimate.\n", - "\n", - "We can pass the desired parameters to our __keys__ keyword argument." + "Next we will define specific parameters that we want to estimate. We can pass the desired parameters to our __keys__ keyword argument." ] }, { @@ -117,7 +131,7 @@ "metadata": {}, "outputs": [], "source": [ - "keys = ['thrower_height', 'throwing_speed', 'g']" + "keys = [\"thrower_height\", \"throwing_speed\", \"g\"]" ] }, { @@ -135,10 +149,10 @@ "outputs": [], "source": [ "# Printing state before\n", - "print('Model configuration before')\n", + "print(\"Model configuration before\")\n", "for key in keys:\n", " print(\"-\", key, m[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" ] }, { @@ -157,7 +171,7 @@ "metadata": {}, "outputs": [], "source": [ - "m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1)" + "m.estimate_params(times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1)" ] }, { @@ -165,7 +179,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now, let's see what the new parameters are after estimation." + "Let's see what the new parameters are after estimation." ] }, { @@ -174,10 +188,10 @@ "metadata": {}, "outputs": [], "source": [ - "print('\\nOptimized configuration')\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" ] }, { @@ -185,7 +199,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Sure enough- parameter estimation determined that the thrower's height wasn't 20m, instead was closer to 1.8m, a much more reasonable height!" + "Sure enough, parameter estimation determined that the thrower's height wasn't 20m. Instead, it was closer to 1.8m, a much more reasonable height. Parameter estimation also correctly estimated g as ~-9.81 $m/s^2$ and throwing speed at around 40 $m/s$, the values used to generate our example data." ] }, { @@ -193,7 +207,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### Example 2) Using Tol" + "## Using Tol" ] }, { @@ -220,18 +234,13 @@ "outputs": [], "source": [ "m = ThrownObject(thrower_height=20, throwing_speed=3.1, g=15)\n", - "m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1, tol=1e-6)\n", - "print('\\nOptimized configuration')\n", + "m.estimate_params(\n", + " times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1, tol=1e-6\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As expected, reducing the tolerance leads to a decrease in the overall error, resulting in more accurate parameters." + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" ] }, { @@ -239,19 +248,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note, if we were to set a high tolerance, such as 10, our error would consequently be very high!\n", + "As expected, reducing the tolerance leads to a decrease in the overall error, resulting in more accurate parameters.\n", "\n", - "Also note that the tol value is for scipy minimize. It is different but strongly correlated to the result of calc_error. For more information on how the `tol` feature works, please consider scipy's `minimize()` documentation located [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also adjust the metric that is used to estimate parameters by setting the error_method to a different `calc_error()` method (see example below).\n", - "Default is Mean Squared Error (MSE).\n", - "See calc_error method for list of options." + "Note that if we were to set a high tolerance, such as 10, our error would consequently be very high! Also note that the tol value is for scipy minimize. It is different but strongly correlated to the result of calc_error. For more information on how the `tol` feature works, please refer to scipy's `minimize()` [documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html).\n", + "\n", + "You can also adjust the metric that is used to estimate parameters by setting the error_method to a different `calc_error()` method (see example below). Default is Mean Squared Error (`MSE`). See `calc_error()` method for list of options." ] }, { @@ -260,15 +261,23 @@ "metadata": {}, "outputs": [], "source": [ - "m['thrower_height'] = 3.1\n", - "m['throwing_speed'] = 29\n", + "m[\"thrower_height\"] = 3.1\n", + "m[\"throwing_speed\"] = 29\n", "\n", "# Using MAE, or Mean Absolute Error instead of the default Mean Squared Error.\n", - "m.estimate_params(times = times, inputs = inputs, outputs = outputs, keys = keys, dt=0.1, tol=1e-9, error_method='MAX_E')\n", - "print('\\nOptimized configuration')\n", + "m.estimate_params(\n", + " times=times,\n", + " inputs=inputs,\n", + " outputs=outputs,\n", + " keys=keys,\n", + " dt=0.1,\n", + " tol=1e-9,\n", + " error_method=\"MAX_E\",\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m[key])\n", - "print(' Error: ', m.calc_error(times, inputs, outputs, dt=0.1, method='MAX_E'))" + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1, method=\"MAX_E\"))" ] }, { @@ -276,7 +285,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note that MAX_E is frequently better at capturing tail behavior in many prognostic models." + "Note that `MAX_E` is frequently better at capturing tail behavior in many prognostic models." ] }, { @@ -284,7 +293,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### Example 3) Handling Noise with Multiple Runs" + "## Handling Noise with Multiple Runs" ] }, { @@ -292,11 +301,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In the previous two examples, we demonstrated how to use `estimate_params()` using a clearly defined ThrownObject model. However, unlike most models, we assumed that there would be no noise!\n", - "\n", - "In this example, we'll show how to use `estimate_params()` with noisy data.\n", + "In the previous two examples, we demonstrated how to use `estimate_params()` using a clearly defined ThrownObject model. However we assumed that there would be no noise in the data used to estimate parameters. This is almost never the case in real life.\n", "\n", - "First let's repeat the previous example, this time generating data from a noisy model." + "In this example, we'll show how to use `estimate_params()` with noisy data. First, let's repeat the previous example, this time generating data from a noisy model." ] }, { @@ -305,14 +312,14 @@ "metadata": {}, "outputs": [], "source": [ - "m = ThrownObject(process_noise = 1)\n", - "results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n", + "m = ThrownObject(process_noise=1)\n", + "results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", "\n", "# Resetting parameters to their incorrectly set values.\n", - "m['thrower_height'] = 20\n", - "m['throwing_speed'] = 3.1\n", - "m['g'] = 15\n", - "keys = ['thrower_height', 'throwing_speed', 'g']" + "m[\"thrower_height\"] = 20\n", + "m[\"throwing_speed\"] = 3.1\n", + "m[\"g\"] = 15\n", + "keys = [\"thrower_height\", \"throwing_speed\", \"g\"]" ] }, { @@ -321,11 +328,13 @@ "metadata": {}, "outputs": [], "source": [ - "m.estimate_params(times = results.times, inputs = results.inputs, outputs = results.outputs, keys = keys)\n", - "print('\\nOptimized configuration')\n", + "m.estimate_params(\n", + " times=results.times, inputs=results.inputs, outputs=results.outputs, keys=keys\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m[key])\n", - "print(' Error: ', m.calc_error(results.times, results.inputs, results.outputs))" + "print(\" Error: \", m.calc_error(results.times, results.inputs, results.outputs))" ] }, { @@ -333,11 +342,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this case, the error from calc_error is low, but to have an accurate estimation of the error, we should actually be manually measuring the Absolute Mean Error rather than using calc_error().\n", + "Here, the error from calc_error is low. To have an accurate estimation of the error, we should actually be manually measuring the Absolute Mean Error rather than using `calc_error()`.\n", "\n", - "The reason being is simple! calc_error() is calculating the error between the simulated and observed data. However, the observed and simulated data in this case are being generated from a model that has noise! In other words, we are comparing the difference of noise to noise, which can lead to inconsistent results!\n", + "The reason being is simple. `calc_error()` is calculating the error between the simulated and observed data. However, the observed and simulated data in this case are being generated from a model that has noise. In other words, we are comparing the difference of noise to noise, which can lead to inconsistent results.\n", "\n", - "Let's create a helper function to calculate the Absolute Mean Error between our original and estimated parameters!" + "Let's create a helper function to calculate the Absolute Mean Error between our original and estimated parameters." ] }, { @@ -349,6 +358,7 @@ "# Creating a new model with the original parameters to compare to the model with noise.\n", "true_Values = ThrownObject()\n", "\n", + "\n", "# Function to determine the Absolute Mean Error (AME) of the model parameters.\n", "def AME(m, keys):\n", " error = 0\n", @@ -362,7 +372,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now using our new AME function we see that the error isn't as great as we thought." + "Using our new AME function, we see that the error isn't as great as we thought." ] }, { @@ -379,7 +389,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note that the error changes every time due to the randomness of noise:" + "Note that the error changes every time due to the randomness of noise." ] }, { @@ -389,17 +399,23 @@ "outputs": [], "source": [ "for count in range(10):\n", - " m = ThrownObject(process_noise = 1)\n", - " results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n", - " \n", + " m = ThrownObject(process_noise=1)\n", + " results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", + "\n", " # Resetting parameters to their originally incorrectly set values.\n", - " m['thrower_height'] = 20\n", - " m['throwing_speed'] = 3.1\n", - " m['g'] = 15\n", + " m[\"thrower_height\"] = 20\n", + " m[\"throwing_speed\"] = 3.1\n", + " m[\"g\"] = 15\n", "\n", - " m.estimate_params(times = results.times, inputs = results.inputs, outputs = results.outputs, keys = keys, dt=0.1)\n", - " error = AME(m, ['thrower_height', 'throwing_speed', 'g'])\n", - " print(f'Estimate Call Number {count} - AME Error {error}')" + " m.estimate_params(\n", + " times=results.times,\n", + " inputs=results.inputs,\n", + " outputs=results.outputs,\n", + " keys=keys,\n", + " dt=0.1,\n", + " )\n", + " error = AME(m, [\"thrower_height\", \"throwing_speed\", \"g\"])\n", + " print(f\"Estimate Call Number {count} - AME Error {error}\")" ] }, { @@ -407,7 +423,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This issue with noise can be overcome with more data. Let's repeat the example above, this time using data from multiple runs. First, let's generate the data:" + "This issue with noise can be overcome with more data. Let's repeat the example above, this time using data from multiple runs. First, let's generate the data." ] }, { @@ -419,7 +435,7 @@ "times, inputs, outputs = [], [], []\n", "m = ThrownObject(process_noise=1)\n", "for count in range(20):\n", - " results = m.simulate_to_threshold(save_freq=0.5, dt=('auto', 0.1))\n", + " results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", " times.append(results.times)\n", " inputs.append(results.inputs)\n", " outputs.append(results.outputs)" @@ -430,7 +446,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next let's reset the parameters to our incorrect values" + "Next, let's reset the parameters to our incorrect values." ] }, { @@ -439,9 +455,9 @@ "metadata": {}, "outputs": [], "source": [ - "m['thrower_height'] = 20\n", - "m['throwing_speed'] = 3.1\n", - "m['g'] = 15" + "m[\"thrower_height\"] = 20\n", + "m[\"throwing_speed\"] = 3.1\n", + "m[\"g\"] = 15" ] }, { @@ -449,7 +465,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, let's call estimate_params with all the collected data" + "Finally, we will call `estimate_params()` with all the collected data." ] }, { @@ -459,11 +475,11 @@ "outputs": [], "source": [ "m.estimate_params(times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1)\n", - "print('\\nOptimized configuration')\n", + "print(\"\\nOptimized configuration\")\n", "for key in keys:\n", " print(\"-\", key, m[key])\n", - "error = AME(m, ['thrower_height', 'throwing_speed', 'g'])\n", - "print('AME Error: ', error)" + "error = AME(m, [\"thrower_height\", \"throwing_speed\", \"g\"])\n", + "print(\"AME Error: \", error)" ] }, { @@ -471,13 +487,629 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Notice that by using data from multiple runs, we are able to produce a lower AME Error than before! This is because we are able to simulate the noise multiple times, which in turn, allows our `estimate_params()` to produce a more accurate result since it is given more data to work with!" + "Notice that by using data from multiple runs, we are able to produce a lower AME Error than before. This is because we are able to simulate the noise multiple times, which in turn, allows our `estimate_params()` to produce a more accurate result since it is given more data to work with." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simplified Battery" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The previous examples all used a simple model, the ThrownObject. For large, complex models, it can be VERY difficult and computationally expensive.\n", + "\n", + "In this example, we will estimate the parameters for the simplified battery model. This model is more complex than the ThrownObject model but is still a relatively simple model. This example demonstrates some approaches useful for estimating parameters in complex models, like estimating parameter subsets on data selected to highlight specific features.\n", + "\n", + "Let's prepare some data for parameter estimation. We will be using the datasets subpackage in progpy for this." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Data Prep" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.datasets import nasa_battery\n", + "\n", + "(desc, data) = nasa_battery.load_data(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The dataset includes 4 different kinds of runs: trickle, step, reference, random walk. We're going to split the dataset into one example for each of the different types for use later.\n", + "\n", + "Let's take a look at the trickle discharge run first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trickle_dataset = data[0]\n", + "print(trickle_dataset)\n", + "trickle_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's do the same for a reference discharge run (5)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reference_dataset = data[5]\n", + "reference_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we will do it for the step runs. Note that this is actually multiple runs that we need to combine. `relativeTime` resets for each \"run\". So if we're going to use multiple runs together, we need to stitch these times together." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[7][\"absoluteTime\"] = data[7][\"relativeTime\"]\n", + "for i in range(8, 32):\n", + " data[i][\"absoluteTime\"] = (\n", + " data[i][\"relativeTime\"] + data[i - 1][\"absoluteTime\"].iloc[-1]\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we should combine the data into a single dataset and investigate the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "step_dataset = pd.concat(data[7:32], ignore_index=True)\n", + "print(step_dataset)\n", + "step_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's investigate the random walk discharge. Like the step discharge, we need to stitch together the times and concatenate the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[35][\"absoluteTime\"] = data[35][\"relativeTime\"]\n", + "for i in range(36, 50):\n", + " data[i][\"absoluteTime\"] = (\n", + " data[i][\"relativeTime\"] + data[i - 1][\"absoluteTime\"].iloc[-1]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "random_walk_dataset = pd.concat(data[35:50], ignore_index=True)\n", + "print(random_walk_dataset)\n", + "random_walk_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the data is ready for this tutorial, let's dive into it." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set Up Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import SimplifiedBattery\n", + "\n", + "m = SimplifiedBattery()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Parameter Estimation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the parameter space." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now test how well it fits the random walk dataset. First, let's prepare the data and future load equation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times_rw = random_walk_dataset[\"absoluteTime\"]\n", + "inputs_rw = [\n", + " elem[1][\"voltage\"] * elem[1][\"current\"] for elem in random_walk_dataset.iterrows()\n", + "]\n", + "outputs_rw = [{\"v\": elem[1][\"voltage\"]} for elem in random_walk_dataset.iterrows()]\n", + "\n", + "import numpy as np\n", + "\n", + "\n", + "def future_load_rw(t, x=None):\n", + " power = np.interp(t, times_rw, inputs_rw)\n", + " return {\"P\": power}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now evaluate how well the battery matches the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=100\n", + ")\n", + "from matplotlib import pyplot as plt\n", + "\n", + "plt.figure()\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]])\n", + "plt.plot(result.times, [z[\"v\"] for z in result.outputs])\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "fig = result.event_states.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a terrible fit. Clearly, the battery model isn't properly configured for this specific battery. Reading through the paper, we see that the default parameters are for a larger battery pouch present in a UAV, much larger than the 18650 battery that produced our dataset.\n", + "\n", + "To correct this, we need to estimate the model parameters.\n", + "\n", + "There are 7 parameters to set (assuming initial SOC is always 1). We can start with setting a few parameters we know. We know that $v_L$ is about 4.2 (from the battery specs). We also expect that the battery internal resistance is the same as that in the electrochemistry model (which also uses an 18650). Finally, we know that the capacity of this battery is significantly smaller than the default values for the larger pouch battery." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m[\"v_L\"] = 4.2 # We know this\n", + "from progpy.models import BatteryElectroChemEOD\n", + "\n", + "m[\"R_int\"] = BatteryElectroChemEOD.default_parameters[\"Ro\"]\n", + "m[\"E_crit\"] /= 4 # Battery capacity is much smaller" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's take a look at the model fit again and see where that got us." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result_guess = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]])\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs])\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Much better, but not there yet. Next, we need to use the parameter estimation feature to estimate the parameters further. Let's prepare some data. We'll use the trickle, reference, and step datasets for this. These are close enough temporally that we can expect aging effects to be minimal.\n", + "\n", + "**NOTE: It is important to use a different dataset to estimate parameters as to test**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times_trickle = trickle_dataset[\"relativeTime\"]\n", + "inputs_trickle = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in trickle_dataset.iterrows()\n", + "]\n", + "outputs_trickle = [{\"v\": elem[1][\"voltage\"]} for elem in trickle_dataset.iterrows()]\n", + "\n", + "times_ref = reference_dataset[\"relativeTime\"]\n", + "inputs_ref = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in reference_dataset.iterrows()\n", + "]\n", + "outputs_ref = [{\"v\": elem[1][\"voltage\"]} for elem in reference_dataset.iterrows()]\n", + "\n", + "times_step = step_dataset[\"relativeTime\"]\n", + "inputs_step = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]} for elem in step_dataset.iterrows()\n", + "]\n", + "outputs_step = [{\"v\": elem[1][\"voltage\"]} for elem in step_dataset.iterrows()]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can print the keys and the error beforehand for reference. The error here is what is used to estimate parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "inputs_reformatted_rw = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in random_walk_dataset.iterrows()\n", + "]\n", + "all_keys = [\"v_L\", \"R_int\", \"lambda\", \"gamma\", \"mu\", \"beta\", \"E_crit\"]\n", + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_guess = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(\"Error: \", error_guess)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's set the bounds on each of the parameters.\n", + "\n", + "For $v_L$ and $R_{int}$, we're defining some small bounds because we have an idea of what they might be. For the others we are saying it's between 0.1 and 10x the default battery. We also are adding a constraint that E_crit must be smaller than the default, since we know it's a smaller battery." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bounds = {\n", + " \"v_L\": (3.75, 4.5),\n", + " \"R_int\": (\n", + " BatteryElectroChemEOD.default_parameters[\"Ro\"] * 0.5,\n", + " BatteryElectroChemEOD.default_parameters[\"Ro\"] * 2.5,\n", + " ),\n", + " \"lambda\": (0.046 / 10, 0.046 * 10),\n", + " \"gamma\": (3.355 / 10, 3.355 * 10),\n", + " \"mu\": (2.759 / 10, 2.759 * 10),\n", + " \"beta\": (8.482 / 10, 8.482 * 10),\n", + " \"E_crit\": (202426.858 / 10, 202426.858), # (smaller than default)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll estimate the parameters. See the [Parameter Estimation](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation) section in the ProgPy documentation for more details.\n", + "\n", + "We can throw all of the data into estimate parameters, but that will take a long time to run and is prone to errors (e.g., getting stuck in local minima). For this example, we will split characterization into parts.\n", + "\n", + "First, we try to capture the base voltage ($v_L$). If we look at the equation above, $v_L$ is the only term that is not a function of either SOC or power. So, for this estimation we use the trickle dataset, where power draw is the lowest, and we only use the first section where SOC can be assumed to be about 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"v_L\"]\n", + "m.estimate_params(\n", + " times=trickle_dataset[\"relativeTime\"].iloc[:10].to_list(),\n", + " inputs=inputs_trickle[:10],\n", + " outputs=outputs_trickle[:10],\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now run the simulation and plot the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit1 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_guess}->{error_fit1} ({error_fit1 - error_guess})\")\n", + "\n", + "result_fit1 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1], [error_guess, error_fit1])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A tiny bit closer, but not significant. Our initial guess (from the packaging) must have been pretty good.\n", + "\n", + "The next step is to estimate the effect of current on the battery. The Parameter $R_{int}$ (internal resistance) effects this. To estimate $R_{int}$ we will use 2 runs where power is not minimal (ref and step runs). Again, we will use only the first couple steps so EOL can be assumed to be 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"R_int\"]\n", + "m.estimate_params(\n", + " times=[times_ref.iloc[:5].to_list(), times_step.iloc[:5].to_list()],\n", + " inputs=[inputs_ref[:5], inputs_step[:5]],\n", + " outputs=[outputs_ref[:5], outputs_step[:5]],\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's look at what that got us." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit2 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_fit1}->{error_fit2} ({error_fit2 - error_fit1})\")\n", + "\n", + "result_fit2 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.plot(result_fit2.times, [z[\"v\"] for z in result_fit2.outputs], label=\"fit2\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1, 2], [error_guess, error_fit1, error_fit2])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Much better, but not there yet! Finally we need to estimate the effects of SOC on battery performance. This involves all of the rest of the parameters. For this we will use all the rest of the parameters. We will not be using the entire reference curve to capture a full discharge.\n", + "\n", + "Note that we're using the error_method `MAX_E`, instead of the default `MSE`. This results in parameters that better estimate the end of the discharge curve and is recommended when estimating parameters that are combined with the event state." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"lambda\", \"gamma\", \"mu\", \"beta\", \"E_crit\"]\n", + "m.estimate_params(\n", + " times=times_ref.to_list(),\n", + " inputs=inputs_ref,\n", + " outputs=outputs_ref,\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + " error_method=\"MAX_E\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now run the simulation and plot the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit3 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_fit2}->{error_fit3} ({error_fit3 - error_fit2})\")\n", + "\n", + "result_fit3 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.plot(result_fit2.times, [z[\"v\"] for z in result_fit2.outputs], label=\"fit2\")\n", + "plt.plot(result_fit3.times, [z[\"v\"] for z in result_fit3.outputs], label=\"fit3\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1, 2, 3], [error_guess, error_fit1, error_fit2, error_fit3])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is even better. Now we have an \"ok\" estimate, ~150 mV (for the sake of a demo). The estimate could be refined further by setting a lower tolerance (tol parameter), or repeating the 4 parameter estimation steps, as shown above.\n", + "\n", + "Parameter estimation is also limited by the model itself. This is a simplified battery model, meaning there were some simplifying assumptions made. It will likely not be able to capture the behavior of a model as well as a higher fidelity model (e.g., BatteryElectroChemEOD)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This chapter introduced the concept of parameter estimation, through which the parameters of a physics-based model are estimated. This is done using a mixture of data, knowledge (e.g., from system specs), and intuition. For large, complex models, it can be VERY difficult and computationally expensive. Fortunately, in this case we have a relatively simple model.\n", + "\n", + "In ProgPy a models `estimate_params` method is used to estimate the parameters. See [Parameter Estimation Docs](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation) for more details.\n", + "\n", + "In the next notebook, we will be exploring (see __[03 Existing Models](03_Existing%20Models.ipynb)__)." ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3.11.0 64-bit", + "display_name": "Python 3.12.0 64-bit", "language": "python", "name": "python3" }, @@ -496,7 +1128,7 @@ "orig_nbformat": 4, "vscode": { "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" + "hash": "f1062708a37074d70712b695aadee582e0b0b9f95f45576b5521424137d05fec" } } }, diff --git a/docs/_downloads/50b678fa73c3d10dd1d1ce03a5f7643f/sim_dcmotor_singlephase.py b/docs/_downloads/50b678fa73c3d10dd1d1ce03a5f7643f/sim_dcmotor_singlephase.py index 1ec964b5..2172c1a9 100644 --- a/docs/_downloads/50b678fa73c3d10dd1d1ce03a5f7643f/sim_dcmotor_singlephase.py +++ b/docs/_downloads/50b678fa73c3d10dd1d1ce03a5f7643f/sim_dcmotor_singlephase.py @@ -2,34 +2,44 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a DC motor being simulated for a set amount of time, using the single-phase dcmotor model. +Example of a DC motor being simulated for a set amount of time, using the single-phase dcmotor model. """ import math from progpy.models import dcmotor_singlephase + def run_example(): motor = dcmotor_singlephase.DCMotorSP() - + def future_loading(t, x=None): f = 0.5 - - # Simple load proportional to rotor speed. + + # Simple load proportional to rotor speed. # This is a typical, hyper-simplified model of a fixed-pitch propeller directly attached to the motor shaft such that the resistant torque # becomes: Cq * omega^2, where Cq is a (assumed to be) constant depending on the propeller profile and omega is the rotor speed. # Since there's no transmission, omega is exactly the speed of the motor shaft. if x is None: # First load (before state is initialized) t_l = 0.0 else: - t_l = 1e-5 * x['v_rot']**2.0 - return motor.InputContainer({ - 'v': 10.0 + 2.0 * math.sin(math.tau * f * t), # voltage input assumed sinusoidal just to show variations in the input. No physical meaning. - 't_l': t_l # assuming constant load (simple) - }) + t_l = 1e-5 * x["v_rot"] ** 2.0 + return motor.InputContainer( + { + "v": 10.0 + + 2.0 + * math.sin( + math.tau * f * t + ), # voltage input assumed sinusoidal just to show variations in the input. No physical meaning. + "t_l": t_l, # assuming constant load (simple) + } + ) - simulated_results = motor.simulate_to(2.0, future_loading, dt=1e-3, save_freq=0.1, print=True) + simulated_results = motor.simulate_to( + 2.0, future_loading, dt=1e-3, save_freq=0.1, print=True + ) simulated_results.states.plot(compact=False) -if __name__ == '__main__': + +if __name__ == "__main__": print("Simulation of DC single-phase motor") run_example() diff --git a/docs/_downloads/51fcca34e00ecb68f3c3fcbb6a7c6f66/param_est.py b/docs/_downloads/51fcca34e00ecb68f3c3fcbb6a7c6f66/param_est.py index de46c1ac..7edd829c 100644 --- a/docs/_downloads/51fcca34e00ecb68f3c3fcbb6a7c6f66/param_est.py +++ b/docs/_downloads/51fcca34e00ecb68f3c3fcbb6a7c6f66/param_est.py @@ -2,57 +2,59 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating the model parameter estimation feature. +Example demonstrating the model parameter estimation feature. """ from progpy.models.thrown_object import ThrownObject + def run_example(): # Step 1: Build the model with your best guess in parameters # Here we're guessing that the thrower is 20 meters tall. Obviously not true! # Let's see if parameter estimation can fix this m = ThrownObject(thrower_height=20) - # Step 2: Collect data from the use of the system. Let's pretend we threw the ball once, and collected position measurements + # Step 2: Collect data from the use of the system. Let's pretend we threw the ball once, and collected position measurements times = [0, 1, 2, 3, 4, 5, 6, 7, 8] - inputs = [{}]*9 + inputs = [{}] * 9 outputs = [ - {'x': 1.83}, - {'x': 36.95}, - {'x': 62.36}, - {'x': 77.81}, - {'x': 83.45}, - {'x': 79.28}, - {'x': 65.3}, - {'x': 41.51}, - {'x': 7.91}, + {"x": 1.83}, + {"x": 36.95}, + {"x": 62.36}, + {"x": 77.81}, + {"x": 83.45}, + {"x": 79.28}, + {"x": 65.3}, + {"x": 41.51}, + {"x": 7.91}, ] # Step 3: Identify the parameters to be estimated - keys = ['thrower_height', 'throwing_speed'] + keys = ["thrower_height", "throwing_speed"] # Printing state before - print('Model configuration before') + print("Model configuration before") for key in keys: print("-", key, m.parameters[key]) - print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4)) + print(" Error: ", m.calc_error(times, inputs, outputs, dt=1e-4)) # Step 4: Run parameter estimation with data m.estimate_params([(times, inputs, outputs)], keys, dt=0.01) # Print result - print('\nOptimized configuration') + print("\nOptimized configuration") for key in keys: print("-", key, m.parameters[key]) - print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4)) - + print(" Error: ", m.calc_error(times, inputs, outputs, dt=1e-4)) + # Sure enough- parameter estimation determined that the thrower's height wasn't 20 m, instead was closer to 1.9m, a much more reasonable height! # Note: You can also adjust the metric that is used to estimate parameters. - # This is done by setting the "error_method" argument. + # This is done by setting the "error_method" argument. # e.g., m.estimate_params([(times, inputs, outputs)], keys, dt=0.01, error_method='MAX_E') # Default is Mean Squared Error (MSE) # See calc_error method for list of options. -if __name__=='__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/5325e8e93d0aa3df393b9d77f56c9160/lstm_model.py b/docs/_downloads/5325e8e93d0aa3df393b9d77f56c9160/lstm_model.py index 717e3f65..983a4dd7 100644 --- a/docs/_downloads/5325e8e93d0aa3df393b9d77f56c9160/lstm_model.py +++ b/docs/_downloads/5325e8e93d0aa3df393b9d77f56c9160/lstm_model.py @@ -1,5 +1,5 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. -# This ensures that the directory containing examples is in the python search directories +# This ensures that the directory containing examples is in the python search directories """ Example building a LSTMStateTransitionModel from data. This is a simple example of how to use the LSTMStateTransitionModel class. @@ -16,11 +16,12 @@ from prog_models.data_models import LSTMStateTransitionModel from prog_models.models import ThrownObject, BatteryElectroChemEOD + def run_example(): # ----------------------------------------------------- # Example 1- set timestep # Here we will create a model for a specific timestep. - # The model will only work with that timestep + # The model will only work with that timestep # This is useful if you know the timestep you would like to use # ----------------------------------------------------- TIMESTEP = 0.1 @@ -30,24 +31,27 @@ def run_example(): # For cases where you're generating a model from data # (e.g., collected from a testbed or a real-world environment), # you'll replace that generated data with your own. - print('Generating data') + print("Generating data") m = ThrownObject() def future_loading(t, x=None): - return m.InputContainer({}) # No input for thrown object + return m.InputContainer({}) # No input for thrown object - data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP) + data = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) # Step 2: Generate model # We'll use the LSTMStateTransitionModel class to generate a model # from the data. - print('Building model...') + print("Building model...") m2 = LSTMStateTransitionModel.from_data( inputs=[data.inputs], outputs=[data.outputs], window=4, epochs=30, # Maximum number of epochs, may stop earlier if early stopping enabled - output_keys=['x']) + output_keys=["x"], + ) # We can see the training history # Should show the model progressively getting better @@ -55,16 +59,16 @@ def future_loading(t, x=None): # If val_loss starts going up again, then we may be overtraining m2.plot_history() plt.show() - + # Step 3: Use model to simulate_to time of threshold - print('Simulating with generated model...') + print("Simulating with generated model...") t_counter = 0 x_counter = m.initialize() def future_loading2(t, x=None): - # Future Loading is a bit complicated here - # Loading for the resulting model includes the data inputs, + # Future Loading is a bit complicated here + # Loading for the resulting model includes the data inputs, # and the output from the last timestep nonlocal t_counter, x_counter z = m.output(x_counter) @@ -72,50 +76,67 @@ def future_loading2(t, x=None): x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z - - results2 = m2.simulate_to(data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP) + + results2 = m2.simulate_to( + data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP + ) # Step 4: Compare model to original model - print('Comparing results...') - data.outputs.plot(title='original model') - results2.outputs.plot(title='generated model') + print("Comparing results...") + data.outputs.plot(title="original model") + results2.outputs.plot(title="generated model") plt.show() # ----------------------------------------------------- - # Example 2- variable timestep + # Example 2- variable timestep # Here we will create a model to work with any timestep # We do this by adding timestep as a variable in the model # ----------------------------------------------------- # Step 1: Generate additional data - # We will use data generated above, but we also want data at additional timesteps - print('\n------------------------------------------\nExample 2...') - print('Generating additional data...') - data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2) - data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4) - data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2) - data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4) + # We will use data generated above, but we also want data at additional timesteps + print("\n------------------------------------------\nExample 2...") + print("Generating additional data...") + data_half = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) # Step 2: Data Prep # We need to add the timestep as a input u = np.array([[TIMESTEP] for _ in data.inputs]) - u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs]) - u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs]) - u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs]) - u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) input_data = [u, u_half, u_quarter, u_twice, u_four] - output_data = [data.outputs, data_half.outputs, data_quarter.outputs, data_twice.outputs, data_four.outputs] + output_data = [ + data.outputs, + data_half.outputs, + data_quarter.outputs, + data_twice.outputs, + data_four.outputs, + ] # Step 3: Generate Model - print('Building model...') + print("Building model...") m3 = LSTMStateTransitionModel.from_data( inputs=input_data, outputs=output_data, window=4, epochs=30, - input_keys=['dt'], - output_keys=['x']) + input_keys=["dt"], + output_keys=["x"], + ) # Note, since we're generating from a model, we could also have done this: # m3 = LSTMStateTransitionModel.from_model( # m, @@ -134,21 +155,25 @@ def future_loading2(t, x=None): def future_loading3(t, x=None): nonlocal t_counter, x_counter - z = m3.InputContainer({'x_t-1': x_counter['x'], 'dt': t - t_counter}) + z = m3.InputContainer({"x_t-1": x_counter["x"], "dt": t - t_counter}) x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z # Use new dt, not used in training - # Using a dt not used in training will demonstrate the model's + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set - data = m.simulate_to(data.times[-1], future_loading, dt=TIMESTEP*3, save_freq=TIMESTEP*3) - results3 = m3.simulate_to(data.times[-1], future_loading3, dt=TIMESTEP*3, save_freq=TIMESTEP*3) + data = m.simulate_to( + data.times[-1], future_loading, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m3.simulate_to( + data.times[-1], future_loading3, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) # Step 5: Compare Results - print('Comparing results...') - data.outputs.plot(title='original model') - results3.outputs.plot(title='generated model') + print("Comparing results...") + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") plt.show() # ----------------------------------------------------- @@ -157,10 +182,13 @@ def future_loading3(t, x=None): # For this example we will use the BatteryElectroChemEOD model # We also include the event state (SOC) # ----------------------------------------------------- - print('\n------------------------------------------\nExample 3...') - print('Generating data...') + print("\n------------------------------------------\nExample 3...") + print("Generating data...") batt = BatteryElectroChemEOD(process_noise=0, measurement_noise=0) - future_loading_eqns = [lambda t, x=None, load=load: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)] + future_loading_eqns = [ + lambda t, x=None, load=load: batt.InputContainer({"i": 1 + 1.5 * load}) + for load in range(6) + ] # Generate data with different loading and step sizes # Adding the step size as an element of the output input_data = [] @@ -168,18 +196,23 @@ def future_loading3(t, x=None): es_data = [] t_met_data = [] for i in range(9): - dt = i/3+0.25 + dt = i / 3 + 0.25 for loading_eqn in future_loading_eqns: d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) - input_data.append(np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float)) + input_data.append( + np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], + dtype=float, + ) + ) output_data.append(d.outputs) es_data.append(d.event_states) - t_met = [[False]for _ in d.times] + t_met = [[False] for _ in d.times] t_met[-1][0] = True # Threshold has been met at the last timestep t_met_data.append(t_met) # Step 2: Generate Model - print('Building model...') + print("Building model...") m_batt = LSTMStateTransitionModel.from_data( inputs=input_data, outputs=output_data, @@ -188,9 +221,10 @@ def future_loading3(t, x=None): window=12, epochs=10, units=64, # Additional units given the increased complexity of the system - input_keys=['i', 'dt'], - output_keys=['t', 'v'], - event_keys=['EOD']) + input_keys=["i", "dt"], + output_keys=["t", "v"], + event_keys=["EOD"], + ) # Take a look at the training history. m_batt.plot_history() @@ -201,32 +235,35 @@ def future_loading3(t, x=None): x_counter = batt.initialize() def future_loading(t, x=None): - return batt.InputContainer({'i': 3}) + return batt.InputContainer({"i": 3}) - def future_loading2(t, x = None): + def future_loading2(t, x=None): nonlocal t_counter, x_counter z = batt.output(x_counter) - z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter}) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z - # Use a new dt, not used in training. - # Using a dt not used in training will demonstrate the model's + # Use a new dt, not used in training. + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) results = m_batt.simulate_to_threshold(future_loading2, dt=1, save_freq=1) # Step 5: Compare Results - print('Comparing results...') - data.outputs.plot(title='original model', compact=False) - results.outputs.plot(title='generated model', compact=False) - data.event_states.plot(title='original model', compact=False) - results.event_states.plot(title='generated model', compact=False) + print("Comparing results...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) + data.event_states.plot(title="original model", compact=False) + results.event_states.plot(title="generated model", compact=False) plt.show() # This last example isn't a perfect fit, but it matches the behavior # well, especially the voltage curve -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/5417b738f8b76e269e7bf2b5de661e87/dataset.py b/docs/_downloads/5417b738f8b76e269e7bf2b5de661e87/dataset.py index cae5b154..950ee2ca 100644 --- a/docs/_downloads/5417b738f8b76e269e7bf2b5de661e87/dataset.py +++ b/docs/_downloads/5417b738f8b76e269e7bf2b5de661e87/dataset.py @@ -6,65 +6,72 @@ .. dropdown:: More details - In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. + In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. """ import matplotlib.pyplot as plt import pickle from progpy.datasets import nasa_battery + DATASET_ID = 1 + def run_example(): # Step 1: Download and import the dataset for a single battery # Note: This may take some time - print('Downloading... ', end='') + print("Downloading... ", end="") (desc, data) = nasa_battery.load_data(DATASET_ID) - print('done') + print("done") # We recommend saving the dataset to disk for future use # This way you don't have to download it each time - pickle.dump((desc, data), open(f'dataset_{DATASET_ID}.pkl', 'wb')) + pickle.dump((desc, data), open(f"dataset_{DATASET_ID}.pkl", "wb")) # Step 2: Access the dataset description - print(f'\nDataset {DATASET_ID}') - print(desc['description']) - print(f'Procedure: {desc["procedure"]}') + print(f"\nDataset {DATASET_ID}") + print(desc["description"]) + print(f"Procedure: {desc['procedure']}") # Step 3: Access the dataset data # Data is in format [run_id][time][variable] - # For the battery the variables are + # For the battery the variables are # 0: relativeTime (since beginning of run) # 1: current (amps) # 2: voltage # 3: temperature (°C) # so that data[a][b, 3] is the temperature at time index b (relative to the start of the run) for run a - print(f'\nNumber of runs: {len(data)}') - print(f'\nAnalyzing run 4') - print(f'number of time indices: {len(data[4])}') + print(f"\nNumber of runs: {len(data)}") + print("\nAnalyzing run 4") + print(f"number of time indices: {len(data[4])}") print(f"Details of run 4: {desc['runs'][4]}") # Plot the run plt.figure() plt.subplot(2, 1, 1) - plt.plot(data[4]['relativeTime'], data[4]['current']) - plt.ylabel('Current (A)') + plt.plot(data[4]["relativeTime"], data[4]["current"]) + plt.ylabel("Current (A)") plt.subplot(2, 1, 2) - plt.plot(data[4]['relativeTime'], data[4]['voltage']) - plt.ylabel('Voltage (V)') - plt.xlabel('Time (s)') - plt.title('Run 4') + plt.plot(data[4]["relativeTime"], data[4]["voltage"]) + plt.ylabel("Voltage (V)") + plt.xlabel("Time (s)") + plt.title("Run 4") # Graph all reference discharge profiles - indices = [i for i, x in enumerate(desc['runs']) if 'reference discharge' in x['desc'] and 'rest' not in x['desc']] + indices = [ + i + for i, x in enumerate(desc["runs"]) + if "reference discharge" in x["desc"] and "rest" not in x["desc"] + ] plt.figure() for i in indices: - plt.plot(data[i]['relativeTime'], data[i]['voltage'], label=f"Run {i}") - plt.title('Reference discharge profiles') - plt.xlabel('Time (s)') - plt.ylabel('Voltage (V)') + plt.plot(data[i]["relativeTime"], data[i]["voltage"], label=f"Run {i}") + plt.title("Reference discharge profiles") + plt.xlabel("Time (s)") + plt.ylabel("Voltage (V)") plt.show() -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/58f5f0c9dc87a40dac5420b5ba373eaf/eol_event.py b/docs/_downloads/58f5f0c9dc87a40dac5420b5ba373eaf/eol_event.py index 81e20939..a8c29cf0 100644 --- a/docs/_downloads/58f5f0c9dc87a40dac5420b5ba373eaf/eol_event.py +++ b/docs/_downloads/58f5f0c9dc87a40dac5420b5ba373eaf/eol_event.py @@ -4,7 +4,7 @@ Method: An instance of ThrownObject is used for this example. In this case it is trivial because the event 'falling' will always occur before 'impact', but for some other models that might not be true. The ThrownObject class is subclassed to add a new event 'EOL' which occurs if any other event occurs. The new model is then instantiated and used for prognostics like in basic_example. Prediction specifically specifies EOL as the event to be predicted. -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time the event 'EOL' is predicted to occur (with uncertainty) @@ -16,35 +16,40 @@ from progpy.predictors import MonteCarlo from progpy.uncertain_data import ScalarData + def run_example(): - # Step 1: Define subclass with EOL event + # Step 1: Define subclass with EOL event # Similar to the prog_models 'events' example, but with an EOL event class ThrownObjectWithEOL(ThrownObject): - events = ThrownObject.events + ['EOL'] + events = ThrownObject.events + ["EOL"] def event_state(self, x): es = super().event_state(x) # Add EOL Event (minimum event state) - es['EOL'] = min(list(es.values())) + es["EOL"] = min(list(es.values())) return es - + def threshold_met(self, x): t_met = super().threshold_met(x) # Add EOL Event (if any events have occured) - t_met['EOL'] = any(list(t_met.values())) + t_met["EOL"] = any(list(t_met.values())) return t_met - + # Step 2: Create instance of subclass m = ThrownObjectWithEOL(process_noise=1) # Step 3: Setup for prediction pred = MonteCarlo(m) + def future_loading(t=None, x=None): return {} # No future loading for ThrownObject + state = ScalarData(m.initialize()) # Step 4: Predict to EOL event - pred_results = pred.predict(state, future_loading, events=['EOL'], dt=0.01, n_samples=50) + pred_results = pred.predict( + state, future_loading, events=["EOL"], dt=0.01, n_samples=50 + ) # In this case EOL is when the object starts falling # But for some models where events aren't sequential, there might be a mixture of events in the EOL @@ -52,6 +57,7 @@ def future_loading(t=None, x=None): pred_results.time_of_event.plot_hist() plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/58fbf5972f15f107846515df4c444be9/lstm_model.py b/docs/_downloads/58fbf5972f15f107846515df4c444be9/lstm_model.py new file mode 100644 index 00000000..a34cf88e --- /dev/null +++ b/docs/_downloads/58fbf5972f15f107846515df4c444be9/lstm_model.py @@ -0,0 +1,269 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. +# This ensures that the directory containing examples is in the python search directories + +""" +Example building a LSTMStateTransitionModel from data. This is a simple example of how to use the LSTMStateTransitionModel class. + +.. dropdown:: More details + + In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We then use the generated model and compare to the original model. + + Finally, we repeat the exercise with data from the more complex BatteryElectroChemEOD model. +""" + +import matplotlib.pyplot as plt +import numpy as np +from progpy.data_models import LSTMStateTransitionModel +from progpy.models import ThrownObject, BatteryElectroChemEOD + + +def run_example(): + # ----------------------------------------------------- + # Example 1- set timestep + # Here we will create a model for a specific timestep. + # The model will only work with that timestep + # This is useful if you know the timestep you would like to use + # ----------------------------------------------------- + TIMESTEP = 0.1 + + # Step 1: Generate data + # We'll use the ThrownObject model to generate data. + # For cases where you're generating a model from data + # (e.g., collected from a testbed or a real-world environment), + # you'll replace that generated data with your own. + print("Generating data") + m = ThrownObject() + + def future_loading(t, x=None): + return m.InputContainer({}) # No input for thrown object + + data = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) + + # Step 2: Generate model + # We'll use the LSTMStateTransitionModel class to generate a model + # from the data. + print("Building model...") + m2 = LSTMStateTransitionModel.from_data( + inputs=[data.inputs], + outputs=[data.outputs], + window=4, + epochs=30, # Maximum number of epochs, may stop earlier if early stopping enabled + output_keys=["x"], + ) + + # We can see the training history + # Should show the model progressively getting better + # (i.e., the loss going down). + # If val_loss starts going up again, then we may be overtraining + m2.plot_history() + plt.show() + + # Step 3: Use model to simulate_to time of threshold + print("Simulating with generated model...") + + t_counter = 0 + x_counter = m.initialize() + + def future_loading2(t, x=None): + # Future Loading is a bit complicated here + # Loading for the resulting model includes the data inputs, + # and the output from the last timestep + nonlocal t_counter, x_counter + z = m.output(x_counter) + z = m2.InputContainer(z.matrix) + x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) + t_counter = t + return z + + results2 = m2.simulate_to( + data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP + ) + + # Step 4: Compare model to original model + print("Comparing results...") + data.outputs.plot(title="original model") + results2.outputs.plot(title="generated model") + plt.show() + + # ----------------------------------------------------- + # Example 2- variable timestep + # Here we will create a model to work with any timestep + # We do this by adding timestep as a variable in the model + # ----------------------------------------------------- + + # Step 1: Generate additional data + # We will use data generated above, but we also want data at additional timesteps + print("\n------------------------------------------\nExample 2...") + print("Generating additional data...") + data_half = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) + + # Step 2: Data Prep + # We need to add the timestep as a input + u = np.array([[TIMESTEP] for _ in data.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) + + input_data = [u, u_half, u_quarter, u_twice, u_four] + output_data = [ + data.outputs, + data_half.outputs, + data_quarter.outputs, + data_twice.outputs, + data_four.outputs, + ] + + # Step 3: Generate Model + print("Building model...") + m3 = LSTMStateTransitionModel.from_data( + inputs=input_data, + outputs=output_data, + window=4, + epochs=30, + input_keys=["dt"], + output_keys=["x"], + ) + # Note, since we're generating from a model, we could also have done this: + # m3 = LSTMStateTransitionModel.from_model( + # m, + # [future_loading for _ in range(5)], + # dt = [TIMESTEP, TIMESTEP/2, TIMESTEP/4, TIMESTEP*2, TIMESTEP*4], + # window=4, + # epochs=30) + + # Take a look at the training history + m3.plot_history() + plt.show() + + # Step 4: Simulate with model + t_counter = 0 + x_counter = m.initialize() + + def future_loading3(t, x=None): + nonlocal t_counter, x_counter + z = m3.InputContainer({"x_t-1": x_counter["x"], "dt": t - t_counter}) + x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) + t_counter = t + return z + + # Use new dt, not used in training + # Using a dt not used in training will demonstrate the model's + # ability to handle different timesteps not part of training set + data = m.simulate_to( + data.times[-1], future_loading, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m3.simulate_to( + data.times[-1], future_loading3, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + + # Step 5: Compare Results + print("Comparing results...") + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") + plt.show() + + # ----------------------------------------------------- + # Example 3- More complicated system + # Here we will create a model for a more complicated system + # For this example we will use the BatteryElectroChemEOD model + # We also include the event state (SOC) + # ----------------------------------------------------- + print("\n------------------------------------------\nExample 3...") + print("Generating data...") + batt = BatteryElectroChemEOD(process_noise=0, measurement_noise=0) + future_loading_eqns = [ + lambda t, x=None, load=load: batt.InputContainer({"i": 1 + 1.5 * load}) + for load in range(6) + ] + # Generate data with different loading and step sizes + # Adding the step size as an element of the output + input_data = [] + output_data = [] + es_data = [] + t_met_data = [] + for i in range(9): + dt = i / 3 + 0.25 + for loading_eqn in future_loading_eqns: + d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) + input_data.append( + np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], + dtype=float, + ) + ) + output_data.append(d.outputs) + es_data.append(d.event_states) + t_met = [[False] for _ in d.times] + t_met[-1][0] = True # Threshold has been met at the last timestep + t_met_data.append(t_met) + + # Step 2: Generate Model + print("Building model...") + m_batt = LSTMStateTransitionModel.from_data( + inputs=input_data, + outputs=output_data, + event_states=es_data, + t_met=t_met_data, + window=12, + epochs=10, + units=64, # Additional units given the increased complexity of the system + input_keys=["i", "dt"], + output_keys=["t", "v"], + event_keys=["EOD"], + ) + + # Take a look at the training history. + m_batt.plot_history() + plt.show() + + # Step 3: Simulate with model + t_counter = 0 + x_counter = batt.initialize() + + def future_loading(t, x=None): + return batt.InputContainer({"i": 3}) + + def future_loading2(t, x=None): + nonlocal t_counter, x_counter + z = batt.output(x_counter) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) + x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) + t_counter = t + return z + + # Use a new dt, not used in training. + # Using a dt not used in training will demonstrate the model's + # ability to handle different timesteps not part of training set + data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) + results = m_batt.simulate_to_threshold(future_loading2, dt=1, save_freq=1) + + # Step 5: Compare Results + print("Comparing results...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) + data.event_states.plot(title="original model", compact=False) + results.event_states.plot(title="generated model", compact=False) + plt.show() + + # This last example isn't a perfect fit, but it matches the behavior + # well, especially the voltage curve + + +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/59378936e8f733d1fe06c87afeeab729/events.ipynb b/docs/_downloads/59378936e8f733d1fe06c87afeeab729/events.ipynb index 921f291c..4dd70b1e 100644 --- a/docs/_downloads/59378936e8f733d1fe06c87afeeab729/events.ipynb +++ b/docs/_downloads/59378936e8f733d1fe06c87afeeab729/events.ipynb @@ -1,54 +1,146 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample further illustrating the concept of 'events' which generalizes EOL. \n\n'Events' is the term used to describe something to be predicted. \nGenerally in the PHM community these are referred to as End of Life (EOL). \nHowever, they can be much more.\n\nIn the prog_models package, events can be anything that needs to be predicted. \nEvents can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). \n\nThis example demonstrates how events can be used in your applications. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models import BatteryElectroChemEOD\n\ndef run_example():\n # Example: Warning thresholds\n # In this example we will use the battery model\n # We of course are interested in end of discharge, but for this example we\n # have a requirement that says the battery must not fall below 5% State of Charge (SOC)\n # Note: SOC is the event state for the End of Discharge (EOD) event\n # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occured. \n # So, 5% SOC corresponds to an 'EOD' event state of 0.05\n # Additionally, we have two warning thresholds (yellow and red)\n\n YELLOW_THRESH = 0.15\n RED_THRESH = 0.1\n THRESHOLD = 0.05\n\n # Step 1: Extend the battery model to define the additional events\n class MyBatt(BatteryElectroChemEOD):\n events = BatteryElectroChemEOD.events + ['EOD_warn_yellow', 'EOD_warn_red', 'EOD_requirement_threshold']\n\n def event_state(self, state):\n # Get event state from parent\n event_state = super().event_state(state)\n\n # Add yellow, red, and failure states by scaling EOD state\n # Here we scale so the threshold SOC is 0 by their associated events, while SOC of 1 is still 1\n # For example, for yellow we want EOD_warn_yellow to be 1 when SOC is 1, and 0 when SOC is YELLOW_THRESH or lower\n event_state['EOD_warn_yellow'] = (event_state['EOD']-YELLOW_THRESH)/(1-YELLOW_THRESH) \n event_state['EOD_warn_red'] = (event_state['EOD']-RED_THRESH)/(1-RED_THRESH)\n event_state['EOD_requirement_threshold'] = (event_state['EOD']-THRESHOLD)/(1-THRESHOLD)\n\n # Return\n return event_state\n\n def threshold_met(self, x):\n # Get threshold met from parent\n t_met = super().threshold_met(x)\n\n # Add yell and red states from event_state\n event_state = self.event_state(x)\n t_met['EOD_warn_yellow'] = event_state['EOD_warn_yellow'] <= 0\n t_met['EOD_warn_red'] = event_state['EOD_warn_red'] <= 0\n t_met['EOD_requirement_threshold'] = event_state['EOD_requirement_threshold'] <= 0\n\n return t_met\n\n # Step 2: Use it\n m = MyBatt()\n\n # 2a: Setup model\n def future_loading(t, x=None):\n # Variable (piece-wise) future loading scheme \n # For a battery, future loading is in term of current 'i' in amps. \n if (t < 600):\n i = 2\n elif (t < 900):\n i = 1\n elif (t < 1800):\n i = 4\n elif (t < 3000):\n i = 2 \n else:\n i = 3\n return m.InputContainer({'i': i})\n \n # 2b: Simulate to threshold\n simulated_results = m.simulate_to_threshold(future_loading, threshold_keys=['EOD'], print = True)\n\n # 2c: Plot results\n simulated_results.event_states.plot()\n import matplotlib.pyplot as plt\n plt.show()\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample further illustrating the concept of 'events' which generalizes EOL. \n\n'Events' is the term used to describe something to be predicted. \nGenerally in the PHM community these are referred to as End of Life (EOL). \nHowever, they can be much more.\n\nIn the prog_models package, events can be anything that needs to be predicted. \nEvents can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). \n\nThis example demonstrates how events can be used in your applications. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOD\n", + "\n", + "\n", + "def run_example():\n", + " # Example: Warning thresholds\n", + " # In this example we will use the battery model\n", + " # We of course are interested in end of discharge, but for this example we\n", + " # have a requirement that says the battery must not fall below 5% State of Charge (SOC)\n", + " # Note: SOC is the event state for the End of Discharge (EOD) event\n", + " # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occured.\n", + " # So, 5% SOC corresponds to an 'EOD' event state of 0.05\n", + " # Additionally, we have two warning thresholds (yellow and red)\n", + "\n", + " YELLOW_THRESH = 0.15\n", + " RED_THRESH = 0.1\n", + " THRESHOLD = 0.05\n", + "\n", + " # Step 1: Extend the battery model to define the additional events\n", + " class MyBatt(BatteryElectroChemEOD):\n", + " events = BatteryElectroChemEOD.events + [\n", + " \"EOD_warn_yellow\",\n", + " \"EOD_warn_red\",\n", + " \"EOD_requirement_threshold\",\n", + " ]\n", + "\n", + " def event_state(self, state):\n", + " # Get event state from parent\n", + " event_state = super().event_state(state)\n", + "\n", + " # Add yellow, red, and failure states by scaling EOD state\n", + " # Here we scale so the threshold SOC is 0 by their associated events, while SOC of 1 is still 1\n", + " # For example, for yellow we want EOD_warn_yellow to be 1 when SOC is 1, and 0 when SOC is YELLOW_THRESH or lower\n", + " event_state[\"EOD_warn_yellow\"] = (event_state[\"EOD\"] - YELLOW_THRESH) / (\n", + " 1 - YELLOW_THRESH\n", + " )\n", + " event_state[\"EOD_warn_red\"] = (event_state[\"EOD\"] - RED_THRESH) / (\n", + " 1 - RED_THRESH\n", + " )\n", + " event_state[\"EOD_requirement_threshold\"] = (\n", + " event_state[\"EOD\"] - THRESHOLD\n", + " ) / (1 - THRESHOLD)\n", + "\n", + " # Return\n", + " return event_state\n", + "\n", + " def threshold_met(self, x):\n", + " # Get threshold met from parent\n", + " t_met = super().threshold_met(x)\n", + "\n", + " # Add yell and red states from event_state\n", + " event_state = self.event_state(x)\n", + " t_met[\"EOD_warn_yellow\"] = event_state[\"EOD_warn_yellow\"] <= 0\n", + " t_met[\"EOD_warn_red\"] = event_state[\"EOD_warn_red\"] <= 0\n", + " t_met[\"EOD_requirement_threshold\"] = (\n", + " event_state[\"EOD_requirement_threshold\"] <= 0\n", + " )\n", + "\n", + " return t_met\n", + "\n", + " # Step 2: Use it\n", + " m = MyBatt()\n", + "\n", + " # 2a: Setup model\n", + " def future_loading(t, x=None):\n", + " # Variable (piece-wise) future loading scheme\n", + " # For a battery, future loading is in term of current 'i' in amps.\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return m.InputContainer({\"i\": i})\n", + "\n", + " # 2b: Simulate to threshold\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=[\"EOD\"], print=True\n", + " )\n", + "\n", + " # 2c: Plot results\n", + " simulated_results.event_states.plot()\n", + " import matplotlib.pyplot as plt\n", + "\n", + " plt.show()\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/5ab98a2d1d233367d7c2fa23299200da/vectorized.ipynb b/docs/_downloads/5ab98a2d1d233367d7c2fa23299200da/vectorized.ipynb index a5491df1..76768a72 100644 --- a/docs/_downloads/5ab98a2d1d233367d7c2fa23299200da/vectorized.ipynb +++ b/docs/_downloads/5ab98a2d1d233367d7c2fa23299200da/vectorized.ipynb @@ -1,54 +1,96 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample using simulate_to_threshold with vectorized states. In this example we are using the thrown_object model to simulate multiple thrown objects\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models.thrown_object import ThrownObject\nfrom numpy import array, all\n\ndef run_example():\n # Step 1: Setup object\n m = ThrownObject()\n def future_load(t, x=None):\n return {} # No load for thrown objects\n\n # Step 2: Setup vectorized initial state\n # For this example we are saying there are 4 throwers of various strengths and heights\n first_state = {\n 'x': array([1.75, 1.8, 1.85, 1.9]),\n 'v': array([35, 39, 22, 47])\n }\n\n # Step 3: Simulate to threshold\n # Here we are simulating till impact using the first state defined above\n (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, x = first_state, threshold_keys=['impact'], print = True, dt=0.1, save_freq=2)\n\n # Now lets do the same thing but only stop when all hit the ground\n def thresholds_met_eqn(thresholds_met):\n return all(thresholds_met['impact']) # Stop when all impact ground\n\n simulated_results = m.simulate_to_threshold(future_load, x = first_state, thresholds_met_eqn=thresholds_met_eqn, print = True, dt=0.1, save_freq=2)\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample using simulate_to_threshold with vectorized states. In this example we are using the thrown_object model to simulate multiple thrown objects\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models.thrown_object import ThrownObject\n", + "from numpy import array, all\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Setup object\n", + " m = ThrownObject()\n", + "\n", + " def future_load(t, x=None):\n", + " return {} # No load for thrown objects\n", + "\n", + " # Step 2: Setup vectorized initial state\n", + " # For this example we are saying there are 4 throwers of various strengths and heights\n", + " first_state = {\"x\": array([1.75, 1.8, 1.85, 1.9]), \"v\": array([35, 39, 22, 47])}\n", + "\n", + " # Step 3: Simulate to threshold\n", + " # Here we are simulating till impact using the first state defined above\n", + " (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(\n", + " future_load,\n", + " x=first_state,\n", + " threshold_keys=[\"impact\"],\n", + " print=True,\n", + " dt=0.1,\n", + " save_freq=2,\n", + " )\n", + "\n", + " # Now lets do the same thing but only stop when all hit the ground\n", + " def thresholds_met_eqn(thresholds_met):\n", + " return all(thresholds_met[\"impact\"]) # Stop when all impact ground\n", + "\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load,\n", + " x=first_state,\n", + " thresholds_met_eqn=thresholds_met_eqn,\n", + " print=True,\n", + " dt=0.1,\n", + " save_freq=2,\n", + " )\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/5b3ab2dda487fa3d828b6cadf3955046/dynamic_step_size.py b/docs/_downloads/5b3ab2dda487fa3d828b6cadf3955046/dynamic_step_size.py index 74f80696..5a8baf1f 100644 --- a/docs/_downloads/5b3ab2dda487fa3d828b6cadf3955046/dynamic_step_size.py +++ b/docs/_downloads/5b3ab2dda487fa3d828b6cadf3955046/dynamic_step_size.py @@ -2,54 +2,70 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. +Example demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. """ from progpy.models.thrown_object import ThrownObject + def run_example(): print("EXAMPLE 1: dt of 1 until 8 sec, then 0.5\n\nSetting up...\n") # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return {} # Step 3: Define dynamic step size function - # This `next_time` function will specify what the next step of the simulation should be at any state and time. + # This `next_time` function will specify what the next step of the simulation should be at any state and time. # f(x, t) -> (t, dt) def next_time(t, x): - # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5 + # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5 if t < 8: return 1 return 0.5 # Step 4: Simulate to impact # Here we're printing every time step so we can see the step size change - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact']) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold( + future_load, + save_freq=1e-99, + print=True, + dt=next_time, + threshold_keys=["impact"], + ) # Example 2 - print("EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \n\nSetting up...\n") + print( + "EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \n\nSetting up...\n" + ) # Step 3: Define dynamic step size function - # This `next_time` function will specify what the next step of the simulation should be at any state and time. + # This `next_time` function will specify what the next step of the simulation should be at any state and time. # f(x, t) -> (t, dt) def next_time(t, x): # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.5, then 0.25 event_state = m.event_state(x) - if event_state['impact'] < 0.5: + if event_state["impact"] < 0.5: return 0.25 return 1 # Step 4: Simulate to impact # Here we're printing every time step so we can see the step size change - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact']) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold( + future_load, + save_freq=1e-99, + print=True, + dt=next_time, + threshold_keys=["impact"], + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/5dbeef5ab444564601ee480dce541e45/sim_valve.py b/docs/_downloads/5dbeef5ab444564601ee480dce541e45/sim_valve.py new file mode 100644 index 00000000..d869d507 --- /dev/null +++ b/docs/_downloads/5dbeef5ab444564601ee480dce541e45/sim_valve.py @@ -0,0 +1,86 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. + +""" +Example of a pneumatic valve being simulated until threshold is met. +""" + +from progpy.models.pneumatic_valve import PneumaticValve + + +def run_example(): + # Create a model object + valv = PneumaticValve(process_noise=0) + + # Define future loading function + cycle_time = 20 + + def future_loading(t, x=None): + t = t % cycle_time + if t < cycle_time / 2: + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, + # Open Valve + "uTop": False, + "uBot": True, + } + ) + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, + # Close Valve + "uTop": True, + "uBot": False, + } + ) + + # Simulate to threshold + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + # Configure options + config = { + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, + } + # Set wear parameter for spring to 1 + valv.parameters["x0"]["wk"] = 1 + + # Define first measured output. This is needed by the simulate_to_threshold method to initialize state + first_output = valv.output(valv.initialize(future_loading(0))) + # Simulate + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) + + # Simulate to threshold again but with a different wear mode + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + # Configure options + config = { + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, + } + # Reset wear parameter for spring to 0, set wear parameter for friction to 1 + valv.parameters["x0"]["wk"] = 0 + valv.parameters["x0"]["wr"] = 1 + + # Define first measured output. This is needed by the simulate_to_threshold method to initialize state + first_output = valv.output(valv.initialize(future_loading(0))) + # Simulate + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/5fadba24f260d034499545c9e4baa297/predict_specific_event.py b/docs/_downloads/5fadba24f260d034499545c9e4baa297/predict_specific_event.py index 2093e971..72c01a91 100644 --- a/docs/_downloads/5fadba24f260d034499545c9e4baa297/predict_specific_event.py +++ b/docs/_downloads/5fadba24f260d034499545c9e4baa297/predict_specific_event.py @@ -7,11 +7,12 @@ from progpy import state_estimators, predictors from progpy.models.thrown_object import ThrownObject + def run_example(): ## Setup m = ThrownObject() initial_state = m.initialize() - load = m.InputContainer({}) # Optimization - create once + load = m.InputContainer({}) # Optimization - create once ## State Estimation - perform a single ukf state estimate step filt = state_estimators.UnscentedKalmanFilter(m, initial_state) @@ -22,19 +23,20 @@ def run_example(): pred = predictors.UnscentedTransformPredictor(m) # Predict with a step size of 0.1 - mc_results = pred.predict(filt.x, dt=0.1, save_freq= 1, events=['impact']) + mc_results = pred.predict(filt.x, dt=0.1, save_freq=1, events=["impact"]) # Print Results for i, time in enumerate(mc_results.times): - print('\nt = {}'.format(time)) - print('\tu = {}'.format(mc_results.inputs.snapshot(i).mean)) - print('\tx = {}'.format(mc_results.states.snapshot(i).mean)) - print('\tz = {}'.format(mc_results.outputs.snapshot(i).mean)) - print('\tevent state = {}'.format(mc_results.states.snapshot(i).mean)) + print("\nt = {}".format(time)) + print("\tu = {}".format(mc_results.inputs.snapshot(i).mean)) + print("\tx = {}".format(mc_results.states.snapshot(i).mean)) + print("\tz = {}".format(mc_results.outputs.snapshot(i).mean)) + print("\tevent state = {}".format(mc_results.states.snapshot(i).mean)) # Note only impact event is shown here - print('\nToE:', mc_results.time_of_event.mean) + print("\nToE:", mc_results.time_of_event.mean) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py b/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py index edd28473..3cbfae17 100644 --- a/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py +++ b/docs/_downloads/638751a90485d06ba802c6a90154e62b/basic_example.py @@ -2,12 +2,12 @@ """ This example performs a state estimation and prediction with uncertainty given a Prognostics Model. - + Method: An instance of the ThrownObject model in progpy is created, and the prediction process is achieved in three steps: 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate 2) Prediction of future states (with uncertainty) and the times at which the event threshold will be reached 3) Metrics tools are used to further investigate the results of prediction -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) iii) Various prediction metrics @@ -17,13 +17,14 @@ from progpy.models import ThrownObject from progpy import * + def run_example(): # Step 1: Setup model & future loading - m = ThrownObject(process_noise = 1) + m = ThrownObject(process_noise=1) initial_state = m.initialize() # Step 2: Demonstrating state estimator - # The state estimator is used to estimate the system state given sensor data. + # The state estimator is used to estimate the system state given sensor data. print("\nPerforming State Estimation Step") # Step 2a: Setup @@ -33,22 +34,26 @@ def run_example(): # Step 2b: Print & Plot Prior State print("Prior State:", filt.x.mean) - print('\nevent state: ', m.event_state(filt.x.mean)) - fig = filt.x.plot_scatter(label='prior') + print("\nevent state: ", m.event_state(filt.x.mean)) + fig = filt.x.plot_scatter(label="prior") # Step 2c: Perform state estimation step, given some measurement, above what's expected - example_measurements = m.OutputContainer({'x': 7.5}) + example_measurements = m.OutputContainer({"x": 7.5}) t = 0.1 u = m.InputContainer({}) - filt.estimate(t, u, example_measurements) # Update state, given (example) sensor data + filt.estimate( + t, u, example_measurements + ) # Update state, given (example) sensor data # Step 2d: Print & Plot Resulting Posterior State # Note the posterior state is greater than the predicted state of 5.95 - # This is because of the high measurement + # This is because of the high measurement print("\nPosterior State:", filt.x.mean) # Event state for 'falling' is less, because velocity has decreased - print('\nEvent State: ', m.event_state(filt.x.mean)) - filt.x.plot_scatter(fig=fig, label='posterior') # Add posterior state to figure from prior state + print("\nEvent State: ", m.event_state(filt.x.mean)) + filt.x.plot_scatter( + fig=fig, label="posterior" + ) # Add posterior state to figure from prior state # Note: in a prognostic application the above state estimation step would be repeated each time # there is new data. Here we're doing one step to demonstrate how the state estimator is used @@ -62,8 +67,10 @@ def run_example(): # Step 3b: Perform a prediction NUM_SAMPLES = 50 STEP_SIZE = 0.01 - mc_results = mc.predict(filt.x, n_samples = NUM_SAMPLES, dt=STEP_SIZE, save_freq=STEP_SIZE) - print('Predicted time of event (ToE): ', mc_results.time_of_event.mean) + mc_results = mc.predict( + filt.x, n_samples=NUM_SAMPLES, dt=STEP_SIZE, save_freq=STEP_SIZE + ) + print("Predicted time of event (ToE): ", mc_results.time_of_event.mean) # Here there are 2 events predicted, when the object starts falling, and when it impacts the ground. # Step 3c: Analyze the results @@ -79,36 +86,58 @@ def run_example(): # You can also access the final state (of type UncertainData), like so: # Note: to get a more accurate final state, you can decrease the step size. final_state = mc_results.time_of_event.final_state - print('State when object starts falling: ', final_state['falling'].mean) - + print("State when object starts falling: ", final_state["falling"].mean) + # You can also use the metrics package to generate some useful metrics on the result of a prediction print("\nEOD Prediction Metrics") from progpy.metrics import prob_success - print('\tPortion between 3.65 and 3.8: ', mc_results.time_of_event.percentage_in_bounds([3.65, 3.8], keys='falling')) - print('\tAssuming ground truth 3.7: ', mc_results.time_of_event.metrics(ground_truth=3.7, keys='falling')) - print('\tP(Success) if mission ends at 7.6: ', prob_success(mc_results.time_of_event, 7.6, keys='impact')) - # Plot state transition + print( + "\tPortion between 3.65 and 3.8: ", + mc_results.time_of_event.percentage_in_bounds([3.65, 3.8], keys="falling"), + ) + print( + "\tAssuming ground truth 3.7: ", + mc_results.time_of_event.metrics(ground_truth=3.7, keys="falling"), + ) + print( + "\tP(Success) if mission ends at 7.6: ", + prob_success(mc_results.time_of_event, 7.6, keys="impact"), + ) + + # Plot state transition # Here we will plot the states at t0, 25% to ToE, 50% to ToE, 75% to ToE, and ToE # You should see the states move together (i.e., velocity is lowest and highest when closest to the ground (before impact, and at beginning, respectively)) - fig = mc_results.states.snapshot(0).plot_scatter(label = "t={} s".format(int(mc_results.times[0]))) # 0 - quarter_index = int(len(mc_results.times)/4) - mc_results.states.snapshot(quarter_index).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index]))) # 25% - mc_results.states.snapshot(quarter_index*2).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*2]))) # 50% - mc_results.states.snapshot(quarter_index*3).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*3]))) # 75% - mc_results.states.snapshot(-1).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[-1]))) # 100% + fig = mc_results.states.snapshot(0).plot_scatter( + label="t={} s".format(int(mc_results.times[0])) + ) # 0 + quarter_index = int(len(mc_results.times) / 4) + mc_results.states.snapshot(quarter_index).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index])) + ) # 25% + mc_results.states.snapshot(quarter_index * 2).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 2])) + ) # 50% + mc_results.states.snapshot(quarter_index * 3).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 3])) + ) # 75% + mc_results.states.snapshot(-1).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[-1])) + ) # 100% # Plot time of event for each event - # If you dont see many bins here, this is because there is not much variety in the estimate. + # If you dont see many bins here, this is because there is not much variety in the estimate. # You can increase the number of bins, decrease step size, or increase the number of samples to see more of a distribution - mc_results.time_of_event.plot_hist(keys='impact') - mc_results.time_of_event.plot_hist(keys='falling') - + mc_results.time_of_event.plot_hist(keys="impact") + mc_results.time_of_event.plot_hist(keys="falling") + # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/678b1b25a6ac553aeff5011765ed5167/generate_surrogate.py b/docs/_downloads/678b1b25a6ac553aeff5011765ed5167/generate_surrogate.py index 34968706..e707712b 100644 --- a/docs/_downloads/678b1b25a6ac553aeff5011765ed5167/generate_surrogate.py +++ b/docs/_downloads/678b1b25a6ac553aeff5011765ed5167/generate_surrogate.py @@ -2,166 +2,199 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of generating a Dynamic Mode Decomposition surrogate model using the battery model +Example of generating a Dynamic Mode Decomposition surrogate model using the battery model """ from progpy.models import BatteryElectroChemEOD as Battery import matplotlib.pyplot as plt -def run_example(): - ### Example 1: Standard DMD Application + +def run_example(): + ### Example 1: Standard DMD Application ## Step 1: Create a model object batt = Battery() - ## Step 2: Define future loading functions for training data - # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired + ## Step 2: Define future loading functions for training data + # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired def future_loading_1(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 500): + # Variable (piece-wise) future loading scheme + if t < 500: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 0.5 else: i = 4.5 - return batt.InputContainer({'i': i}) - + return batt.InputContainer({"i": i}) + def future_loading_2(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 300): + # Variable (piece-wise) future loading scheme + if t < 300: i = 2 - elif (t < 800): + elif t < 800: i = 3.5 - elif (t < 1300): + elif t < 1300: i = 4 - elif (t < 1600): + elif t < 1600: i = 1.5 else: i = 5 - return batt.InputContainer({'i': i}) - + return batt.InputContainer({"i": i}) + load_functions = [future_loading_1, future_loading_2] - ## Step 3: generate surrogate model + ## Step 3: generate surrogate model # Simulation options for training data and surrogate model generation # Note: here dt is less than save_freq. This means the model will iterate forward multiple steps per saved point. - # This is commonly done to ensure accuracy. + # This is commonly done to ensure accuracy. options_surrogate = { - 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated - 'dt': 0.1, # For DMD, this value is the time step of the training data - 'trim_data_to': 0.7 # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data_to": 0.7, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model } # Set noise in Prognostics Model, default for surrogate model is also this value - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 - # Generate surrogate model - surrogate = batt.generate_surrogate(load_functions,**options_surrogate) + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) - ## Step 4: Use surrogate model + ## Step 4: Use surrogate model # Simulation options for implementation of surrogate model options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } - # Define loading profile + # Define loading profile def future_loading(t, x=None): - if (t < 600): + if t < 600: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 1.5 else: i = 4 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Calculate Error - MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs) - print('Example 1 MSE:',MSE) + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 1 MSE:", MSE) # Not a very good approximation # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 1 Input') - simulated_results.outputs.plot(ylabel = 'Predicted Outputs (temperature and voltage)',title='Example 1 Predicted Outputs') - simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 1 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 1 Input") + simulated_results.outputs.plot( + ylabel="Predicted Outputs (temperature and voltage)", + title="Example 1 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 1 Predicted SOC" + ) # To visualize the accuracy of the approximation, run the high-fidelity model options_hf = { - 'dt': 0.1, - 'save_freq': 1, + "dt": 0.1, + "save_freq": 1, } - high_fidelity_results = batt.simulate_to_threshold(future_loading,**options_hf) + high_fidelity_results = batt.simulate_to_threshold(future_loading, **options_hf) # Save voltage results to compare - voltage_dmd = [simulated_results.outputs[iter1]['v'] for iter1 in range(len(simulated_results.times))] - voltage_hf = [high_fidelity_results.outputs[iter2]['v'] for iter2 in range(len(high_fidelity_results.times))] + voltage_dmd = [ + simulated_results.outputs[iter1]["v"] + for iter1 in range(len(simulated_results.times)) + ] + voltage_hf = [ + high_fidelity_results.outputs[iter2]["v"] + for iter2 in range(len(high_fidelity_results.times)) + ] plt.subplots() - plt.plot(simulated_results.times,voltage_dmd,'-b',label='DMD approximation') - plt.plot(high_fidelity_results.times, voltage_hf,'--r',label='High fidelity result') + plt.plot(simulated_results.times, voltage_dmd, "-b", label="DMD approximation") + plt.plot( + high_fidelity_results.times, voltage_hf, "--r", label="High fidelity result" + ) plt.legend() - plt.title('Comparing DMD approximation to high-fidelity model results') + plt.title("Comparing DMD approximation to high-fidelity model results") - ### Example 2: Add process_noise to the surrogate model - # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate) - surrogate.parameters['process_noise'] = 1e-04 - surrogate.parameters['process_noise_dist'] = 'normal' + ### Example 2: Add process_noise to the surrogate model + # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate) + surrogate.parameters["process_noise"] = 1e-04 + surrogate.parameters["process_noise_dist"] = "normal" - # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + # Simulate to threshold using DMD approximation + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 2 Input') - simulated_results.outputs.plot(keys=['v'],ylabel = 'Predicted Voltage (volts)', title='Example 2 Predicted Outputs') - simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 2 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 2 Input") + simulated_results.outputs.plot( + keys=["v"], + ylabel="Predicted Voltage (volts)", + title="Example 2 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 2 Predicted SOC" + ) ### Example 3: Generate surrogate model with a subset of internal states, inputs, and/or outputs - # Note: we use the same loading profiles as defined in Ex. 1 + # Note: we use the same loading profiles as defined in Ex. 1 - ## Generate surrogate model + ## Generate surrogate model # Simulation options for training data and surrogate model generation options_surrogate = { - 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated - 'dt': 0.1, # For DMD, this value is the time step of the training data - 'trim_data': 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model - 'state_keys': ['Vsn','Vsp','tb'], # Define internal states to be included in surrogate model - 'output_keys': ['v'] # Define outputs to be included in surrogate model + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data": 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + "state_keys": [ + "Vsn", + "Vsp", + "tb", + ], # Define internal states to be included in surrogate model + "output_keys": ["v"], # Define outputs to be included in surrogate model } # Set noise in Prognostics Model, default for surrogate model is also this value - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 - # Generate surrogate model - surrogate = batt.generate_surrogate(load_functions,**options_surrogate) + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) - ## Use surrogate model - # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. - # The surrogate model results will be faster but less accurate than the original model. + ## Use surrogate model + # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. + # The surrogate model results will be faster but less accurate than the original model. # Simulation options for implementation of surrogate model options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Calculate Error - MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs) - print('Example 3 MSE:',MSE) + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 3 MSE:", MSE) # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 3 Input') - simulated_results.outputs.plot(ylabel = 'Outputs (voltage)',title='Example 3 Predicted Output') - simulated_results.event_states.plot(ylabel = 'State of Charge',title='Example 3 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 3 Input") + simulated_results.outputs.plot( + ylabel="Outputs (voltage)", title="Example 3 Predicted Output" + ) + simulated_results.event_states.plot( + ylabel="State of Charge", title="Example 3 Predicted SOC" + ) plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/6a1ef040b1b1816faac11de7b6788010/visualize.py b/docs/_downloads/6a1ef040b1b1816faac11de7b6788010/visualize.py index cba4dcca..6af2d7d3 100644 --- a/docs/_downloads/6a1ef040b1b1816faac11de7b6788010/visualize.py +++ b/docs/_downloads/6a1ef040b1b1816faac11de7b6788010/visualize.py @@ -2,36 +2,50 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating the Visualization Module. +Example demonstrating the Visualization Module. """ - import matplotlib.pyplot as plt from progpy.models.thrown_object import ThrownObject + def run_example(): - print('Visualize Module Example') + print("Visualize Module Example") m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return {} # Step 3: Simulate to impact - event = 'impact' - options={'dt':0.005, 'save_freq':1} - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], **options) - + event = "impact" + options = {"dt": 0.005, "save_freq": 1} + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], **options + ) # Display states # ============== - simulated_results.states.plot(compact = False, suptitle = 'state evolution', title = True, - xlabel = 'time', ylabel = {'x': 'position', 'v': 'velocity'}, display_labels = 'minimal', - legend = {'display': True , 'display_at_subplot': 'all'}) - simulated_results.states.plot(compact = True, suptitle = 'state evolution', title = 'example title', xlabel = 'time', ylabel = 'position') + simulated_results.states.plot( + compact=False, + suptitle="state evolution", + title=True, + xlabel="time", + ylabel={"x": "position", "v": "velocity"}, + display_labels="minimal", + legend={"display": True, "display_at_subplot": "all"}, + ) + simulated_results.states.plot( + compact=True, + suptitle="state evolution", + title="example title", + xlabel="time", + ylabel="position", + ) plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/6a63ddc864911190f486ef9f5e7ad4af/param_est.py b/docs/_downloads/6a63ddc864911190f486ef9f5e7ad4af/param_est.py index 781249ce..a25ff66b 100644 --- a/docs/_downloads/6a63ddc864911190f486ef9f5e7ad4af/param_est.py +++ b/docs/_downloads/6a63ddc864911190f486ef9f5e7ad4af/param_est.py @@ -2,51 +2,53 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating the model parameter estimation feature. +Example demonstrating the model parameter estimation feature. """ from progpy.models.thrown_object import ThrownObject + def run_example(): # Step 1: Build the model with your best guess in parameters # Here we're guessing that the thrower is 20 meters tall. Obviously not true! # Let's see if parameter estimation can fix this m = ThrownObject(thrower_height=20) - # Step 2: Collect data from the use of the system. Let's pretend we threw the ball once, and collected position measurements + # Step 2: Collect data from the use of the system. Let's pretend we threw the ball once, and collected position measurements times = [0, 1, 2, 3, 4, 5, 6, 7, 8] - inputs = [{}]*9 + inputs = [{}] * 9 outputs = [ - {'x': 1.83}, - {'x': 36.95}, - {'x': 62.36}, - {'x': 77.81}, - {'x': 83.45}, - {'x': 79.28}, - {'x': 65.3}, - {'x': 41.51}, - {'x': 7.91}, + {"x": 1.83}, + {"x": 36.95}, + {"x": 62.36}, + {"x": 77.81}, + {"x": 83.45}, + {"x": 79.28}, + {"x": 65.3}, + {"x": 41.51}, + {"x": 7.91}, ] # Step 3: Identify the parameters to be estimated - keys = ['thrower_height', 'throwing_speed'] + keys = ["thrower_height", "throwing_speed"] # Printing state before - print('Model configuration before') + print("Model configuration before") for key in keys: print("-", key, m.parameters[key]) - print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4)) + print(" Error: ", m.calc_error(times, inputs, outputs, dt=1e-4)) # Step 4: Run parameter estimation with data m.estimate_params([(times, inputs, outputs)], keys, dt=0.01) # Print result - print('\nOptimized configuration') + print("\nOptimized configuration") for key in keys: print("-", key, m.parameters[key]) - print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4)) - + print(" Error: ", m.calc_error(times, inputs, outputs, dt=1e-4)) + # Sure enough- parameter estimation determined that the thrower's height wasn't 20 m, instead was closer to 1.9m, a much more reasonable height! -if __name__=='__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/6b940c6b760d99b131616029abefd14c/05_Data Driven.ipynb b/docs/_downloads/6b940c6b760d99b131616029abefd14c/05_Data Driven.ipynb index 0cbd9d00..0554cfd6 100644 --- a/docs/_downloads/6b940c6b760d99b131616029abefd14c/05_Data Driven.ipynb +++ b/docs/_downloads/6b940c6b760d99b131616029abefd14c/05_Data Driven.ipynb @@ -4,21 +4,77 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Using Data-Driven Models\n", - "**A version of this notebook will be added in release v1.8, including:**" + "# 5. Using Data-Driven Models" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## General Use\n", + "In addition to the physics-based modeling functionalities described so far, ProgPy also includes a framework for implementing data-driven models. \n", "\n", - "### Building a new model from data\n", + "A data-driven model is a model where the behavior is learned from data. In ProgPy, data-driven models derive from the parent class `progpy.data_models.DataModel`. A common example of a data-driven model is one that uses neural networks (e.g., `progpy.data_models.LSTMStateTransitionModel`). \n", "\n", + "Some data-driven methodologies we will be exploring in this section include [Long Short-Term Memory (LSTM)](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#lstmstatetransitionmodel), [Dynamic Mode Decomposition (DMD)](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#dmdmodel), and [Polynomial Chaos Expansion (PCE)](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#polynomialchaosexpansion). The data-driven architecture also includes [surrogate models](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#from-another-prognosticsmodel-i-e-surrogate), which can be used to create models that approximate the original/higher-fidelity models, generally resulting in a less accurate model that is more computationally efficient.\n", + "\n", + "For more information, refer to the [`DataModel` documentation](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html).\n", + "\n", + "Before we get started, make sure to install the data-driven dependencies using the following command:\n", + "\n", + "`pip install 'progpy[datadriven]'`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "\n", + "* [General Use](#General-Use)\n", + " * [Building a New Model from Data](#Building-a-New-Model-from-Data)\n", + " * [Surrogate Models](#Surrogate-models)\n", + "* [Long Short-Term Memory (LSTM)](#Long-Short-Term-Memory-(LSTM))\n", + "* [Dynamic Mode Decomposition (DMD)](#Dynamic-Mode-Decomposition-(DMD))\n", + "* [Polynomial Chaos Expansion (PCE)](#Polynomial-Chaos-Expansion-(PCE))\n", + "* [Extending](#Extending)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## General Use" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Building a New Model from Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "### Surrogate Models" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -26,6 +82,13 @@ "## Long Short-Term Memory (LSTM)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -33,6 +96,13 @@ "## Dynamic Mode Decomposition (DMD)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -40,12 +110,40 @@ "## Polynomial Chaos Expansion (PCE)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Extending" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this notebook will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section, we were able to look at various ways to use data-driven models. The next section __[06 Combining Models](06_Combining%20Models.ipynb)__ examines how prognostics models can be combined." + ] } ], "metadata": { diff --git a/docs/_downloads/6c00d1b4a87a04ca552d41096eeda216/serialization.py b/docs/_downloads/6c00d1b4a87a04ca552d41096eeda216/serialization.py index 7e8d4c80..130833f1 100644 --- a/docs/_downloads/6c00d1b4a87a04ca552d41096eeda216/serialization.py +++ b/docs/_downloads/6c00d1b4a87a04ca552d41096eeda216/serialization.py @@ -8,17 +8,18 @@ import pickle from progpy.models import BatteryElectroChemEOD as Battery -def run_example(): + +def run_example(): ## Step 1: Create a model object batt = Battery() # Set process nosie to 0 to illustrate match between original and serialized versions - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 ### Step 2: serialize model for future use # Note: Model serialization has a lot of purposes, like saving a specific model to a file to be loaded later or sending a model to another machine over a network connection. - - # METHOD 1: Serialize with JSON + + # METHOD 1: Serialize with JSON save_json = batt.to_json() # Model can be called directly with serialized result @@ -27,55 +28,82 @@ def run_example(): # Serialized result can also be saved to a text file and uploaded later using the following code: txtFile = open("model_save_json.txt", "w") txtFile.write(save_json) - txtFile.close() + txtFile.close() - with open('model_save_json.txt') as infile: + with open("model_save_json.txt") as infile: load_json = infile.read() serial_2 = Battery.from_json(load_json) # METHOD 2: Serialize by pickling - pickle.dump(batt, open('model_save_pkl.pkl','wb')) - load_pkl = pickle.load(open('model_save_pkl.pkl','rb')) + pickle.dump(batt, open("model_save_pkl.pkl", "wb")) + load_pkl = pickle.load(open("model_save_pkl.pkl", "rb")) ## Step 3: Simulate to threshold and compare results options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } - # Define loading profile + # Define loading profile def future_loading(t, x=None): - if (t < 600): + if t < 600: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 1.5 else: i = 4 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) - # Simulate to threshold - results_orig = batt.simulate_to_threshold(future_loading,**options_sim) + # Simulate to threshold + results_orig = batt.simulate_to_threshold(future_loading, **options_sim) results_serial_1 = serial_1.simulate_to_threshold(future_loading, **options_sim) results_serial_2 = serial_2.simulate_to_threshold(future_loading, **options_sim) results_serial_3 = load_pkl.simulate_to_threshold(future_loading, **options_sim) # Plot results for comparison - voltage_orig = [results_orig.outputs[iter]['v'] for iter in range(len(results_orig.times))] - voltage_serial_1 = [results_serial_1.outputs[iter]['v'] for iter in range(len(results_serial_1.times))] - voltage_serial_2 = [results_serial_2.outputs[iter]['v'] for iter in range(len(results_serial_2.times))] - voltage_serial_3 = [results_serial_3.outputs[iter]['v'] for iter in range(len(results_serial_3.times))] - - plt.plot(results_orig.times,voltage_orig,'-b',label='Original surrogate') - plt.plot(results_serial_1.times,voltage_serial_1,'--r',label='First JSON serialized surrogate') - plt.plot(results_serial_2.times,voltage_serial_2,'-.g',label='Second JSON serialized surrogate') - plt.plot(results_serial_3.times, voltage_serial_3, '--y', label='Pickled serialized surrogate') + voltage_orig = [ + results_orig.outputs[iter]["v"] for iter in range(len(results_orig.times)) + ] + voltage_serial_1 = [ + results_serial_1.outputs[iter]["v"] + for iter in range(len(results_serial_1.times)) + ] + voltage_serial_2 = [ + results_serial_2.outputs[iter]["v"] + for iter in range(len(results_serial_2.times)) + ] + voltage_serial_3 = [ + results_serial_3.outputs[iter]["v"] + for iter in range(len(results_serial_3.times)) + ] + + plt.plot(results_orig.times, voltage_orig, "-b", label="Original surrogate") + plt.plot( + results_serial_1.times, + voltage_serial_1, + "--r", + label="First JSON serialized surrogate", + ) + plt.plot( + results_serial_2.times, + voltage_serial_2, + "-.g", + label="Second JSON serialized surrogate", + ) + plt.plot( + results_serial_3.times, + voltage_serial_3, + "--y", + label="Pickled serialized surrogate", + ) plt.legend() - plt.xlabel('Time (sec)') - plt.ylabel('Voltage (volts)') + plt.xlabel("Time (sec)") + plt.ylabel("Voltage (volts)") plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/6d45e93e9ca9007ed5e116c58b2ade78/generate_surrogate.ipynb b/docs/_downloads/6d45e93e9ca9007ed5e116c58b2ade78/generate_surrogate.ipynb index 6ca8d984..8d693a25 100644 --- a/docs/_downloads/6d45e93e9ca9007ed5e116c58b2ade78/generate_surrogate.ipynb +++ b/docs/_downloads/6d45e93e9ca9007ed5e116c58b2ade78/generate_surrogate.ipynb @@ -1,54 +1,246 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample of generating a Dynamic Mode Decomposition surrogate model using the battery model \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models import BatteryElectroChemEOD as Battery\n\nimport matplotlib.pyplot as plt\n\ndef run_example(): \n ### Example 1: Standard DMD Application \n ## Step 1: Create a model object\n batt = Battery()\n\n ## Step 2: Define future loading functions for training data \n # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired \n def future_loading_1(t, x=None):\n # Variable (piece-wise) future loading scheme \n if (t < 500):\n i = 3\n elif (t < 1000):\n i = 2\n elif (t < 1500):\n i = 0.5\n else:\n i = 4.5\n return batt.InputContainer({'i': i})\n \n def future_loading_2(t, x=None):\n # Variable (piece-wise) future loading scheme \n if (t < 300):\n i = 2\n elif (t < 800):\n i = 3.5\n elif (t < 1300):\n i = 4\n elif (t < 1600):\n i = 1.5\n else:\n i = 5\n return batt.InputContainer({'i': i})\n \n load_functions = [future_loading_1, future_loading_2]\n\n ## Step 3: generate surrogate model \n # Simulation options for training data and surrogate model generation\n # Note: here dt is less than save_freq. This means the model will iterate forward multiple steps per saved point.\n # This is commonly done to ensure accuracy. \n options_surrogate = {\n 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated\n 'dt': 0.1, # For DMD, this value is the time step of the training data\n 'trim_data_to': 0.7 # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model\n }\n\n # Set noise in Prognostics Model, default for surrogate model is also this value\n batt.parameters['process_noise'] = 0\n\n # Generate surrogate model \n surrogate = batt.generate_surrogate(load_functions,**options_surrogate)\n\n ## Step 4: Use surrogate model \n # Simulation options for implementation of surrogate model\n options_sim = {\n 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results\n }\n\n # Define loading profile \n def future_loading(t, x=None):\n if (t < 600):\n i = 3\n elif (t < 1000):\n i = 2\n elif (t < 1500):\n i = 1.5\n else:\n i = 4\n return batt.InputContainer({'i': i})\n\n # Simulate to threshold using DMD approximation\n simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim)\n\n # Calculate Error\n MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs)\n print('Example 1 MSE:',MSE)\n # Not a very good approximation\n\n # Plot results\n simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 1 Input')\n simulated_results.outputs.plot(ylabel = 'Predicted Outputs (temperature and voltage)',title='Example 1 Predicted Outputs')\n simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 1 Predicted SOC')\n\n # To visualize the accuracy of the approximation, run the high-fidelity model\n options_hf = {\n 'dt': 0.1,\n 'save_freq': 1,\n }\n high_fidelity_results = batt.simulate_to_threshold(future_loading,**options_hf)\n\n # Save voltage results to compare\n voltage_dmd = [simulated_results.outputs[iter1]['v'] for iter1 in range(len(simulated_results.times))]\n voltage_hf = [high_fidelity_results.outputs[iter2]['v'] for iter2 in range(len(high_fidelity_results.times))]\n\n plt.subplots()\n plt.plot(simulated_results.times,voltage_dmd,'-b',label='DMD approximation')\n plt.plot(high_fidelity_results.times, voltage_hf,'--r',label='High fidelity result')\n plt.legend()\n plt.title('Comparing DMD approximation to high-fidelity model results')\n\n ### Example 2: Add process_noise to the surrogate model \n # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate)\n surrogate.parameters['process_noise'] = 1e-04\n surrogate.parameters['process_noise_dist'] = 'normal'\n\n # Simulate to threshold using DMD approximation \n simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim)\n\n # Plot results\n simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 2 Input')\n simulated_results.outputs.plot(keys=['v'],ylabel = 'Predicted Voltage (volts)', title='Example 2 Predicted Outputs')\n simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 2 Predicted SOC')\n\n ### Example 3: Generate surrogate model with a subset of internal states, inputs, and/or outputs\n # Note: we use the same loading profiles as defined in Ex. 1\n\n ## Generate surrogate model \n # Simulation options for training data and surrogate model generation\n options_surrogate = {\n 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated\n 'dt': 0.1, # For DMD, this value is the time step of the training data\n 'trim_data': 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model\n 'state_keys': ['Vsn','Vsp','tb'], # Define internal states to be included in surrogate model\n 'output_keys': ['v'] # Define outputs to be included in surrogate model \n }\n\n # Set noise in Prognostics Model, default for surrogate model is also this value\n batt.parameters['process_noise'] = 0\n\n # Generate surrogate model \n surrogate = batt.generate_surrogate(load_functions,**options_surrogate)\n\n ## Use surrogate model \n # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. \n # The surrogate model results will be faster but less accurate than the original model. \n\n # Simulation options for implementation of surrogate model\n options_sim = {\n 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results\n }\n\n # Simulate to threshold using DMD approximation\n simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim)\n\n # Calculate Error\n MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs)\n print('Example 3 MSE:',MSE)\n\n # Plot results\n simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 3 Input')\n simulated_results.outputs.plot(ylabel = 'Outputs (voltage)',title='Example 3 Predicted Output')\n simulated_results.event_states.plot(ylabel = 'State of Charge',title='Example 3 Predicted SOC')\n plt.show()\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample of generating a Dynamic Mode Decomposition surrogate model using the battery model \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOD as Battery\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "def run_example():\n", + " ### Example 1: Standard DMD Application\n", + " ## Step 1: Create a model object\n", + " batt = Battery()\n", + "\n", + " ## Step 2: Define future loading functions for training data\n", + " # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired\n", + " def future_loading_1(t, x=None):\n", + " # Variable (piece-wise) future loading scheme\n", + " if t < 500:\n", + " i = 3\n", + " elif t < 1000:\n", + " i = 2\n", + " elif t < 1500:\n", + " i = 0.5\n", + " else:\n", + " i = 4.5\n", + " return batt.InputContainer({\"i\": i})\n", + "\n", + " def future_loading_2(t, x=None):\n", + " # Variable (piece-wise) future loading scheme\n", + " if t < 300:\n", + " i = 2\n", + " elif t < 800:\n", + " i = 3.5\n", + " elif t < 1300:\n", + " i = 4\n", + " elif t < 1600:\n", + " i = 1.5\n", + " else:\n", + " i = 5\n", + " return batt.InputContainer({\"i\": i})\n", + "\n", + " load_functions = [future_loading_1, future_loading_2]\n", + "\n", + " ## Step 3: generate surrogate model\n", + " # Simulation options for training data and surrogate model generation\n", + " # Note: here dt is less than save_freq. This means the model will iterate forward multiple steps per saved point.\n", + " # This is commonly done to ensure accuracy.\n", + " options_surrogate = {\n", + " \"save_freq\": 1, # For DMD, this value is the time step for which the surrogate model is generated\n", + " \"dt\": 0.1, # For DMD, this value is the time step of the training data\n", + " \"trim_data_to\": 0.7, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model\n", + " }\n", + "\n", + " # Set noise in Prognostics Model, default for surrogate model is also this value\n", + " batt.parameters[\"process_noise\"] = 0\n", + "\n", + " # Generate surrogate model\n", + " surrogate = batt.generate_surrogate(load_functions, **options_surrogate)\n", + "\n", + " ## Step 4: Use surrogate model\n", + " # Simulation options for implementation of surrogate model\n", + " options_sim = {\n", + " \"save_freq\": 1 # Frequency at which results are saved, or equivalently time step in results\n", + " }\n", + "\n", + " # Define loading profile\n", + " def future_loading(t, x=None):\n", + " if t < 600:\n", + " i = 3\n", + " elif t < 1000:\n", + " i = 2\n", + " elif t < 1500:\n", + " i = 1.5\n", + " else:\n", + " i = 4\n", + " return batt.InputContainer({\"i\": i})\n", + "\n", + " # Simulate to threshold using DMD approximation\n", + " simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim)\n", + "\n", + " # Calculate Error\n", + " MSE = batt.calc_error(\n", + " simulated_results.times, simulated_results.inputs, simulated_results.outputs\n", + " )\n", + " print(\"Example 1 MSE:\", MSE)\n", + " # Not a very good approximation\n", + "\n", + " # Plot results\n", + " simulated_results.inputs.plot(ylabel=\"Current (amps)\", title=\"Example 1 Input\")\n", + " simulated_results.outputs.plot(\n", + " ylabel=\"Predicted Outputs (temperature and voltage)\",\n", + " title=\"Example 1 Predicted Outputs\",\n", + " )\n", + " simulated_results.event_states.plot(\n", + " ylabel=\"Predicted State of Charge\", title=\"Example 1 Predicted SOC\"\n", + " )\n", + "\n", + " # To visualize the accuracy of the approximation, run the high-fidelity model\n", + " options_hf = {\n", + " \"dt\": 0.1,\n", + " \"save_freq\": 1,\n", + " }\n", + " high_fidelity_results = batt.simulate_to_threshold(future_loading, **options_hf)\n", + "\n", + " # Save voltage results to compare\n", + " voltage_dmd = [\n", + " simulated_results.outputs[iter1][\"v\"]\n", + " for iter1 in range(len(simulated_results.times))\n", + " ]\n", + " voltage_hf = [\n", + " high_fidelity_results.outputs[iter2][\"v\"]\n", + " for iter2 in range(len(high_fidelity_results.times))\n", + " ]\n", + "\n", + " plt.subplots()\n", + " plt.plot(simulated_results.times, voltage_dmd, \"-b\", label=\"DMD approximation\")\n", + " plt.plot(\n", + " high_fidelity_results.times, voltage_hf, \"--r\", label=\"High fidelity result\"\n", + " )\n", + " plt.legend()\n", + " plt.title(\"Comparing DMD approximation to high-fidelity model results\")\n", + "\n", + " ### Example 2: Add process_noise to the surrogate model\n", + " # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate)\n", + " surrogate.parameters[\"process_noise\"] = 1e-04\n", + " surrogate.parameters[\"process_noise_dist\"] = \"normal\"\n", + "\n", + " # Simulate to threshold using DMD approximation\n", + " simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim)\n", + "\n", + " # Plot results\n", + " simulated_results.inputs.plot(ylabel=\"Current (amps)\", title=\"Example 2 Input\")\n", + " simulated_results.outputs.plot(\n", + " keys=[\"v\"],\n", + " ylabel=\"Predicted Voltage (volts)\",\n", + " title=\"Example 2 Predicted Outputs\",\n", + " )\n", + " simulated_results.event_states.plot(\n", + " ylabel=\"Predicted State of Charge\", title=\"Example 2 Predicted SOC\"\n", + " )\n", + "\n", + " ### Example 3: Generate surrogate model with a subset of internal states, inputs, and/or outputs\n", + " # Note: we use the same loading profiles as defined in Ex. 1\n", + "\n", + " ## Generate surrogate model\n", + " # Simulation options for training data and surrogate model generation\n", + " options_surrogate = {\n", + " \"save_freq\": 1, # For DMD, this value is the time step for which the surrogate model is generated\n", + " \"dt\": 0.1, # For DMD, this value is the time step of the training data\n", + " \"trim_data\": 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model\n", + " \"state_keys\": [\n", + " \"Vsn\",\n", + " \"Vsp\",\n", + " \"tb\",\n", + " ], # Define internal states to be included in surrogate model\n", + " \"output_keys\": [\"v\"], # Define outputs to be included in surrogate model\n", + " }\n", + "\n", + " # Set noise in Prognostics Model, default for surrogate model is also this value\n", + " batt.parameters[\"process_noise\"] = 0\n", + "\n", + " # Generate surrogate model\n", + " surrogate = batt.generate_surrogate(load_functions, **options_surrogate)\n", + "\n", + " ## Use surrogate model\n", + " # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model.\n", + " # The surrogate model results will be faster but less accurate than the original model.\n", + "\n", + " # Simulation options for implementation of surrogate model\n", + " options_sim = {\n", + " \"save_freq\": 1 # Frequency at which results are saved, or equivalently time step in results\n", + " }\n", + "\n", + " # Simulate to threshold using DMD approximation\n", + " simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim)\n", + "\n", + " # Calculate Error\n", + " MSE = batt.calc_error(\n", + " simulated_results.times, simulated_results.inputs, simulated_results.outputs\n", + " )\n", + " print(\"Example 3 MSE:\", MSE)\n", + "\n", + " # Plot results\n", + " simulated_results.inputs.plot(ylabel=\"Current (amps)\", title=\"Example 3 Input\")\n", + " simulated_results.outputs.plot(\n", + " ylabel=\"Outputs (voltage)\", title=\"Example 3 Predicted Output\"\n", + " )\n", + " simulated_results.event_states.plot(\n", + " ylabel=\"State of Charge\", title=\"Example 3 Predicted SOC\"\n", + " )\n", + " plt.show()\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/6f42ac9351eb525e4c9f7984283126d1/sensitivity.py b/docs/_downloads/6f42ac9351eb525e4c9f7984283126d1/sensitivity.py index 9882ca99..5e3e5253 100644 --- a/docs/_downloads/6f42ac9351eb525e4c9f7984283126d1/sensitivity.py +++ b/docs/_downloads/6f42ac9351eb525e4c9f7984283126d1/sensitivity.py @@ -2,52 +2,78 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example performing a sensitivity analysis on a new model. +Example performing a sensitivity analysis on a new model. """ # Deriv prog model was selected because the model can be described as x' = x + dx*dt from progpy.models.thrown_object import ThrownObject import numpy as np + def run_example(): # Demo model # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return m.InputContainer({}) # Step 3: Setup range on parameters considered thrower_height_range = np.arange(1.2, 2.1, 0.1) - # Step 4: Sim for each - event = 'impact' + # Step 4: Sim for each + event = "impact" eods = np.empty(len(thrower_height_range)) - for (i, thrower_height) in zip(range(len(thrower_height_range)), thrower_height_range): - m.parameters['thrower_height'] = thrower_height - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt =1e-3, save_freq =10) + for i, thrower_height in zip( + range(len(thrower_height_range)), thrower_height_range + ): + m.parameters["thrower_height"] = thrower_height + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=1e-3, save_freq=10 + ) eods[i] = simulated_results.times[-1] # Step 5: Analysis - print('For a reasonable range of heights, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3))) - sensitivity = (eods[-1]-eods[0])/(thrower_height_range[-1] - thrower_height_range[0]) - print(' - Average sensitivity: {} s per cm height'.format(round(sensitivity/100, 6))) + print( + "For a reasonable range of heights, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / ( + thrower_height_range[-1] - thrower_height_range[0] + ) + print( + " - Average sensitivity: {} s per cm height".format( + round(sensitivity / 100, 6) + ) + ) print(" - It seems impact time is not very sensitive to thrower's height") # Now lets repeat for throw speed throw_speed_range = np.arange(20, 40, 1) eods = np.empty(len(throw_speed_range)) - for (i, throw_speed) in zip(range(len(throw_speed_range)), throw_speed_range): - m.parameters['throwing_speed'] = throw_speed - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':1e-3, 'save_freq':10}) + for i, throw_speed in zip(range(len(throw_speed_range)), throw_speed_range): + m.parameters["throwing_speed"] = throw_speed + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], options={"dt": 1e-3, "save_freq": 10} + ) eods[i] = simulated_results.times[-1] - print('\nFor a reasonable range of throwing speeds, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3))) - sensitivity = (eods[-1]-eods[0])/(throw_speed_range[-1] - throw_speed_range[0]) - print(' - Average sensitivity: {} s per m/s speed'.format(round(sensitivity/100, 6))) + print( + "\nFor a reasonable range of throwing speeds, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / (throw_speed_range[-1] - throw_speed_range[0]) + print( + " - Average sensitivity: {} s per m/s speed".format( + round(sensitivity / 100, 6) + ) + ) print(" - It seems impact time is much more dependent on throwing speed") -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py b/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py index 1dc74dad..f0b09c66 100644 --- a/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py +++ b/docs/_downloads/70520a32b5407f76734c16a1ffe189b9/custom_model.py @@ -8,7 +8,7 @@ For most cases, you will be able to use the standard LSTMStateTransitionModel.from_data class with configuration (see the LSTMStateTransitionModel class for more details). However, sometimes you might want to add custom layers, or other complex components. In that case, you will build a custom model and pass it into LSTMStateTransitionModel. - In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. + In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We build and fit a custom model using keras.layers. Finally, we compare performance to the standard format and the original model. """ @@ -20,21 +20,27 @@ from tensorflow import keras from tensorflow.keras import layers + def run_example(): WINDOW = 12 - print('Generating data...') + print("Generating data...") batt = BatteryElectroChemEOD() - future_loading_eqns = [lambda t, x=None: batt.InputContainer({'i': 1+1.4*load}) for load in range(6)] + future_loading_eqns = [ + lambda t, x=None: batt.InputContainer({"i": 1 + 1.4 * load}) + for load in range(6) + ] # Generate data with different loading and step sizes # Adding the step size as an element of the output input_data = [] output_data = [] for i in range(9): - dt = i/3+0.25 + dt = i / 3 + 0.25 for loading_eqn in future_loading_eqns: - d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) - u = np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float) + d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) + u = np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float + ) z = d.outputs if len(u) > WINDOW: @@ -44,36 +50,39 @@ def run_example(): # Step 2: Build standard model print("Building standard model...") m_batt = LSTMStateTransitionModel.from_data( - inputs = input_data, - outputs = output_data, - window=WINDOW, - epochs=30, + inputs=input_data, + outputs=output_data, + window=WINDOW, + epochs=30, units=64, # Additional units given the increased complexity of the system - input_keys = ['i', 'dt'], - output_keys = ['t', 'v']) - m_batt.plot_history() + input_keys=["i", "dt"], + output_keys=["t", "v"], + ) + m_batt.plot_history() # Step 3: Build custom model - print('Building custom model...') - (u_all, z_all, _, _) = LSTMStateTransitionModel.pre_process_data(input_data, output_data, window=12) - + print("Building custom model...") + (u_all, z_all, _, _) = LSTMStateTransitionModel.pre_process_data( + input_data, output_data, window=12 + ) + # Normalize n_inputs = len(input_data[0][0]) - u_mean = np.mean(u_all[:,0,:n_inputs], axis=0) - u_std = np.std(u_all[:,0,:n_inputs], axis=0) - # If there's no variation- don't normalize + u_mean = np.mean(u_all[:, 0, :n_inputs], axis=0) + u_std = np.std(u_all[:, 0, :n_inputs], axis=0) + # If there's no variation- don't normalize u_std[u_std == 0] = 1 z_mean = np.mean(z_all, axis=0) z_std = np.std(z_all, axis=0) - # If there's no variation- don't normalize + # If there's no variation- don't normalize z_std[z_std == 0] = 1 # Add output (since z_t-1 is last input) u_mean = np.hstack((u_mean, z_mean)) u_std = np.hstack((u_std, z_std)) - u_all = (u_all - u_mean)/u_std - z_all = (z_all - z_mean)/z_std + u_all = (u_all - u_mean) / u_std + z_all = (z_all - z_mean) / z_std # u_mean and u_std act on the column vector form (from inputcontainer) # so we need to transpose them to a column vector @@ -88,40 +97,51 @@ def run_example(): x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x) model = keras.Model(inputs, x) model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"]) - history = model.fit(u_all, z_all, epochs=30, callbacks = callbacks, validation_split = 0.1) + history = model.fit( + u_all, z_all, epochs=30, callbacks=callbacks, validation_split=0.1 + ) # Step 4: Build LSTMStateTransitionModel - m_custom = LSTMStateTransitionModel(model, - normalization=normalization, - input_keys = ['i', 'dt'], - output_keys = ['t', 'v'], history=history # Provide history so plot_history will work + m_custom = LSTMStateTransitionModel( + model, + normalization=normalization, + input_keys=["i", "dt"], + output_keys=["t", "v"], + history=history, # Provide history so plot_history will work ) m_custom.plot_history() # Step 5: Simulate - print('Simulating...') + print("Simulating...") t_counter = 0 x_counter = batt.initialize() + def future_loading(t, x=None): - return batt.InputContainer({'i': 3}) + return batt.InputContainer({"i": 3}) - def future_loading2(t, x = None): + def future_loading2(t, x=None): nonlocal t_counter, x_counter z = batt.output(x_counter) - z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter}) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z + data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) - results_custom = m_custom.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) + results_custom = m_custom.simulate_to( + data.times[-1], future_loading2, dt=1, save_freq=1 + ) # Step 6: Compare performance - print('Comparing performance...') - data.outputs.plot(title='original model', compact=False) - results.outputs.plot(title='generated model', compact=False) - results_custom.outputs.plot(title='custom model', compact=False) + print("Comparing performance...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) + results_custom.outputs.plot(title="custom model", compact=False) plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/70522e06688c5fddf4cc38239ec439e7/pce.py b/docs/_downloads/70522e06688c5fddf4cc38239ec439e7/pce.py index cc106b97..6281a284 100644 --- a/docs/_downloads/70522e06688c5fddf4cc38239ec439e7/pce.py +++ b/docs/_downloads/70522e06688c5fddf4cc38239ec439e7/pce.py @@ -1,5 +1,5 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. -# This ensures that the directory containing examples is in the python search directories +# This ensures that the directory containing examples is in the python search directories """ This example demonstrates the Polynomial Chaos Expansion (PCE) Surrogate Direct Model functionality. PCE is a method by which the behavior of a model can be approximated by a polynomial. In this case the relationship between future loading and time of event. The result is a direct surrogate model that can be used to estimate time of event given a loading profile, without requiring the original model to be simulated. The resulting estimation is MUCH faster than simulating the model. @@ -16,15 +16,16 @@ from progpy.data_models import PCE import scipy as sp + def run_example(): # First lets define some constants # Time step used in simulation - DT = 0.5 + DT = 0.5 # The number of samples to used in the PCE # Larger gives a better approximation, but takes longer to generate - N_SAMPLES = 100 + N_SAMPLES = 100 # The distribution of the input current # This defines the expected values for the input @@ -32,27 +33,29 @@ def run_example(): # With a uniform distribution (i.e., no value in that range is more likely than any other) INPUT_CURRENT_DIST = cp.Uniform(3, 8) # Note: These discharge rates are VERY high. This is only for demonstration purposes. - # The high discharge rate will accelerate the degradation of the battery, + # The high discharge rate will accelerate the degradation of the battery, # which will cause the example to run faster # Step 1: Define base model # First let's define the base model that we're creating a surrogate for. - m = BatteryElectroChemEOD(process_noise = 0) + m = BatteryElectroChemEOD(process_noise=0) x0 = m.initialize() # Initial State - + # Step 2: Build surrogate # Next we build the surrogate model from the base model # To build the model we pass in the distributions of possible values for each input. # We also provide the max_time. This is the maximum time that the surrogate will be used for. # We dont expect any battery to last more than 4000 seconds given the high discharge curves we're passing in. - m_surrogate = PCE.from_model(m, - x0, # Model State - {'i': INPUT_CURRENT_DIST}, # Distribution of inputs - dt=DT, - times = [i*1000 for i in range(5)], - N = N_SAMPLES) + m_surrogate = PCE.from_model( + m, + x0, # Model State + {"i": INPUT_CURRENT_DIST}, # Distribution of inputs + dt=DT, + times=[i * 1000 for i in range(5)], + N=N_SAMPLES, + ) # The result (m_surrogate) is a model that can be used to VERY quickly estimate time_of_event for a new loading profile. - + # Note: this is only valid for the initial state (x0) of the battery. # To train for another state pass in the parameter x (type StateContainer). # e.g. m_surrogate = PCE.from_model(m, SOME_OTHER_STATE, ...) @@ -71,25 +74,30 @@ def run_example(): def future_loading(t, x=None): return m.InputContainer(interpolator(t)[np.newaxis].T) - TEST_SAMPLES = m_surrogate.parameters['J'].sample(size=N_TEST_CASES, rule='latin_hypercube') + TEST_SAMPLES = m_surrogate.parameters["J"].sample( + size=N_TEST_CASES, rule="latin_hypercube" + ) for i in range(N_TEST_CASES): # Generate a new loading profile - interpolator = sp.interpolate.interp1d(m_surrogate.parameters['times'], TEST_SAMPLES[:, i]) - + interpolator = sp.interpolate.interp1d( + m_surrogate.parameters["times"], TEST_SAMPLES[:, i] + ) + # Estimate time of event from ground truth (original model) and surrogate - gt_results[i] = m.time_of_event(x0, future_loading, dt = DT)['EOD'] - surrogate_results[i] = m_surrogate.time_of_event(x0, future_loading)['EOD'] + gt_results[i] = m.time_of_event(x0, future_loading, dt=DT)["EOD"] + surrogate_results[i] = m_surrogate.time_of_event(x0, future_loading)["EOD"] # Plot results # Note here that the approximation is very good, but not perfect # Approximation would be even better with more samples plt.scatter(gt_results, surrogate_results) max_val = max(max(gt_results), max(surrogate_results)) - plt.plot([0, max_val], [0, max_val], 'k--') + plt.plot([0, max_val], [0, max_val], "k--") plt.xlabel("Ground Truth (s)") plt.ylabel("PCE (s)") plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/7053e047a245b5db57b0d7dd870c51ce/option_scoring.py b/docs/_downloads/7053e047a245b5db57b0d7dd870c51ce/option_scoring.py index 3ee62143..760dc0f0 100644 --- a/docs/_downloads/7053e047a245b5db57b0d7dd870c51ce/option_scoring.py +++ b/docs/_downloads/7053e047a245b5db57b0d7dd870c51ce/option_scoring.py @@ -11,6 +11,7 @@ import prog_client import time + def run_example(): # Step 1: Prepare load profiles to compare # Create a load profile for each option @@ -20,81 +21,90 @@ def run_example(): # LOAD is a dict with keys corresponding to model.inputs # Note: Dict must be in order of increasing time LOAD_PROFILES = [ - { # Plan 0 - 0: {'i': 2}, - 600: {'i': 1}, - 900: {'i': 4}, - 1800: {'i': 2}, - 3000: {'i': 3} + { # Plan 0 + 0: {"i": 2}, + 600: {"i": 1}, + 900: {"i": 4}, + 1800: {"i": 2}, + 3000: {"i": 3}, + }, + { # Plan 1 + 0: {"i": 3}, + 900: {"i": 2}, + 1000: {"i": 3.5}, + 2000: {"i": 2.5}, + 2300: {"i": 3}, }, - { # Plan 1 - 0: {'i': 3}, - 900: {'i': 2}, - 1000: {'i': 3.5}, - 2000: {'i': 2.5}, - 2300: {'i': 3} + { # Plan 2 + 0: {"i": 1.25}, + 800: {"i": 2}, + 1100: {"i": 2.5}, + 2200: {"i": 6}, }, - { # Plan 2 - 0: {'i': 1.25}, - 800: {'i': 2}, - 1100: {'i': 2.5}, - 2200: {'i': 6}, - } ] - - # Step 2: Open a session with the server for a thrown object. + + # Step 2: Open a session with the server for a battery circuit. # We are specifying a time of interest of 2000 seconds. - # This could be the end of a mission/session, or some inspection time. - print('\nStarting Sessions') - sessions = [prog_client.Session('BatteryCircuit', pred_cfg = {'save_pts': [2000], 'save_freq': 1e99, 'n_samples':15}, load_est = 'Variable', load_est_cfg = LOAD_PROFILES[i]) for i in range(len(LOAD_PROFILES))] + # This could be the end of a mission/session, or some inspection time. + print("\nStarting Sessions") + sessions = [ + prog_client.Session( + "BatteryCircuit", + pred_cfg={"save_pts": [2000], "save_freq": 1e99, "n_samples": 15}, + load_est="Variable", + load_est_cfg=LOAD_PROFILES[i], + ) + for i in range(len(LOAD_PROFILES)) + ] # Step 3: Wait for prognostics to complete - print('\nWaiting for sessions to complete (this may take a bit)') + print("\nWaiting for sessions to complete (this may take a bit)") STEP = 15 # Time to wait between pinging server (s) - + for session in sessions: sessions_in_progress = True while sessions_in_progress: sessions_in_progress = False status = session.get_prediction_status() - if status['in progress'] != 0: - print(f'\tSession {session.session_id} is still in progress') + if status["in progress"] != 0: + print(f"\tSession {session.session_id} is still in progress") sessions_in_progress = True time.sleep(STEP) - print(f'\tSession {session.session_id} complete') - print('All sessions complete') - + print(f"\tSession {session.session_id} complete") + print("All sessions complete") + # Step 4: Get the results - print('Getting results') + print("Getting results") results = [session.get_predicted_toe()[1] for session in sessions] # Step 5: Compare results - print('\nComparing results') - print('Mean ToE:') + print("\nComparing results") + print("Mean ToE:") best_toe = 0 best_plan = None for i in range(len(results)): - mean_toe = results[i].mean['EOD'] - print(f'\tOption {i}: {mean_toe:0.2f}s') + mean_toe = results[i].mean["EOD"] + print(f"\tOption {i}: {mean_toe:0.2f}s") if mean_toe > best_toe: best_toe = mean_toe best_plan = i - print(f'Best option using method 1: Option {best_plan}') + print(f"Best option using method 1: Option {best_plan}") - print('\nSOC at point of interest (2000 sec):') + print("\nSOC at point of interest (2000 sec):") best_soc = 0 best_plan = None soc = [session.get_predicted_event_state()[1] for session in sessions] for i in range(len(soc)): - mean_soc = soc[i].snapshot(-1).mean['EOD'] - print(f'\tOption {i}: {mean_soc:0.3f} SOC') + mean_soc = soc[i].snapshot(-1).mean["EOD"] + print(f"\tOption {i}: {mean_soc:0.3f} SOC") if mean_soc > best_soc: best_soc = mean_soc best_plan = i - print(f'Best option using method 2: Option {best_plan}') + print(f"Best option using method 2: Option {best_plan}") + + # Other metrics can be used as well, like probability of mission success given a certain mission time, uncertainty in ToE estimate, final state at end of mission, etc. - # Other metrics can be used as well, like probability of mission success given a certain mission time, uncertainty in ToE estimate, final state at end of mission, etc. # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/737a7adede8d8031e0dac89b9d76aa3c/noise.ipynb b/docs/_downloads/737a7adede8d8031e0dac89b9d76aa3c/noise.ipynb index 9d7b1091..aaeee53d 100644 --- a/docs/_downloads/737a7adede8d8031e0dac89b9d76aa3c/noise.ipynb +++ b/docs/_downloads/737a7adede8d8031e0dac89b9d76aa3c/noise.ipynb @@ -1,54 +1,178 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating approaches for adding and handling model noise\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n\nfrom progpy.models.thrown_object import ThrownObject\n\ndef run_example():\n # Define future loading\n def future_load(t=None, x=None): \n # The thrown object model has no inputs- you cannot load the system (i.e., affect it once it's in the air)\n # So we return an empty input container\n return m.InputContainer({})\n\n # Define configuration for simulation\n config = {\n 'threshold_keys': 'impact', # Simulate until the thrown object has impacted the ground\n 'dt': 0.005, # Time step (s)\n 'save_freq': 0.5, # Frequency at which results are saved (s)\n }\n\n # Define a function to print the results - will be used later\n def print_results(simulated_results):\n # Print results\n print('states:')\n for (t,x) in zip(simulated_results.times, simulated_results.states):\n print('\\t{:.2f}s: {}'.format(t, x))\n\n print('outputs:')\n for (t,x) in zip(simulated_results.times, simulated_results.outputs):\n print('\\t{:.2f}s: {}'.format(t, x))\n\n print('\\nimpact time: {:.2f}s'.format(simulated_results.times[-1]))\n # The simulation stopped at impact, so the last element of times is the impact time\n\n # Plot results\n simulated_results.states.plot()\n\n # Ex1: No noise\n m = ThrownObject(process_noise = False)\n simulated_results = m.simulate_to_threshold(future_load, **config)\n print_results(simulated_results)\n plt.title('Ex1: No noise')\n\n # Ex2: with noise - same noise applied to every state\n process_noise = 15\n m = ThrownObject(process_noise = process_noise) # Noise with a std of 0.5 to every state\n print('\\nExample without same noise for every state')\n simulated_results = m.simulate_to_threshold(future_load, **config)\n print_results(simulated_results)\n plt.title('Ex2: Basic Noise')\n\n # Ex3: noise- more noise on position than velocity\n process_noise = {'x': 30, 'v': 1}\n m = ThrownObject(process_noise = process_noise) \n print('\\nExample with more noise on position than velocity')\n simulated_results = m.simulate_to_threshold(future_load, **config)\n print_results(simulated_results)\n plt.title('Ex3: More noise on position')\n\n # Ex4: noise- Ex3 but uniform\n process_noise_dist = 'uniform'\n model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise}\n m = ThrownObject(**model_config) \n print('\\nExample with more uniform noise')\n simulated_results = m.simulate_to_threshold(future_load, **config)\n print_results(simulated_results)\n plt.title('Ex4: Ex3 with uniform dist')\n\n # Ex5: noise- Ex3 but triangle\n process_noise_dist = 'triangular'\n model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise}\n m = ThrownObject(**model_config) \n print('\\nExample with triangular process noise')\n simulated_results = m.simulate_to_threshold(future_load, **config)\n print_results(simulated_results)\n plt.title('Ex5: Ex3 with triangular dist')\n\n # Ex6: Measurement noise\n # Everything we've done with process noise, we can also do with measurement noise.\n # Just use 'measurement_noise' and 'measurement_noise_dist' \n measurement_noise = {'x': 20} # For each output\n measurement_noise_dist = 'uniform'\n model_config = {'measurement_noise_dist': measurement_noise_dist, 'measurement_noise': measurement_noise}\n m = ThrownObject(**model_config) \n print('\\nExample with measurement noise')\n print('- Note: outputs are different than state- this is the application of measurement noise')\n simulated_results = m.simulate_to_threshold(future_load, **config)\n print_results(simulated_results)\n plt.title('Ex6: Measurement noise')\n\n # Ex7: OK, now for something a little more complicated. Let's try proportional noise on v only (more variation when it's going faster)\n # This can be used to do custom or more complex noise distributions\n def apply_proportional_process_noise(self, x, dt = 1):\n x['v'] -= dt*0.5*x['v']\n return x\n model_config = {'process_noise': apply_proportional_process_noise}\n m = ThrownObject(**model_config)\n print('\\nExample with proportional noise on velocity')\n simulated_results = m.simulate_to_threshold(future_load, **config)\n print_results(simulated_results)\n plt.title('Ex7: Proportional noise on velocity')\n\n print('\\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value')\n print('e.g., numpy.random.seed(42)')\n plt.show()\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating approaches for adding and handling model noise\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "from progpy.models.thrown_object import ThrownObject\n", + "\n", + "\n", + "def run_example():\n", + " # Define future loading\n", + " def future_load(t=None, x=None):\n", + " # The thrown object model has no inputs- you cannot load the system (i.e., affect it once it's in the air)\n", + " # So we return an empty input container\n", + " return m.InputContainer({})\n", + "\n", + " # Define configuration for simulation\n", + " config = {\n", + " \"threshold_keys\": \"impact\", # Simulate until the thrown object has impacted the ground\n", + " \"dt\": 0.005, # Time step (s)\n", + " \"save_freq\": 0.5, # Frequency at which results are saved (s)\n", + " }\n", + "\n", + " # Define a function to print the results - will be used later\n", + " def print_results(simulated_results):\n", + " # Print results\n", + " print(\"states:\")\n", + " for t, x in zip(simulated_results.times, simulated_results.states):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", + "\n", + " print(\"outputs:\")\n", + " for t, x in zip(simulated_results.times, simulated_results.outputs):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", + "\n", + " print(\"\\nimpact time: {:.2f}s\".format(simulated_results.times[-1]))\n", + " # The simulation stopped at impact, so the last element of times is the impact time\n", + "\n", + " # Plot results\n", + " simulated_results.states.plot()\n", + "\n", + " # Ex1: No noise\n", + " m = ThrownObject(process_noise=False)\n", + " simulated_results = m.simulate_to_threshold(future_load, **config)\n", + " print_results(simulated_results)\n", + " plt.title(\"Ex1: No noise\")\n", + "\n", + " # Ex2: with noise - same noise applied to every state\n", + " process_noise = 15\n", + " m = ThrownObject(\n", + " process_noise=process_noise\n", + " ) # Noise with a std of 0.5 to every state\n", + " print(\"\\nExample without same noise for every state\")\n", + " simulated_results = m.simulate_to_threshold(future_load, **config)\n", + " print_results(simulated_results)\n", + " plt.title(\"Ex2: Basic Noise\")\n", + "\n", + " # Ex3: noise- more noise on position than velocity\n", + " process_noise = {\"x\": 30, \"v\": 1}\n", + " m = ThrownObject(process_noise=process_noise)\n", + " print(\"\\nExample with more noise on position than velocity\")\n", + " simulated_results = m.simulate_to_threshold(future_load, **config)\n", + " print_results(simulated_results)\n", + " plt.title(\"Ex3: More noise on position\")\n", + "\n", + " # Ex4: noise- Ex3 but uniform\n", + " process_noise_dist = \"uniform\"\n", + " model_config = {\n", + " \"process_noise_dist\": process_noise_dist,\n", + " \"process_noise\": process_noise,\n", + " }\n", + " m = ThrownObject(**model_config)\n", + " print(\"\\nExample with more uniform noise\")\n", + " simulated_results = m.simulate_to_threshold(future_load, **config)\n", + " print_results(simulated_results)\n", + " plt.title(\"Ex4: Ex3 with uniform dist\")\n", + "\n", + " # Ex5: noise- Ex3 but triangle\n", + " process_noise_dist = \"triangular\"\n", + " model_config = {\n", + " \"process_noise_dist\": process_noise_dist,\n", + " \"process_noise\": process_noise,\n", + " }\n", + " m = ThrownObject(**model_config)\n", + " print(\"\\nExample with triangular process noise\")\n", + " simulated_results = m.simulate_to_threshold(future_load, **config)\n", + " print_results(simulated_results)\n", + " plt.title(\"Ex5: Ex3 with triangular dist\")\n", + "\n", + " # Ex6: Measurement noise\n", + " # Everything we've done with process noise, we can also do with measurement noise.\n", + " # Just use 'measurement_noise' and 'measurement_noise_dist'\n", + " measurement_noise = {\"x\": 20} # For each output\n", + " measurement_noise_dist = \"uniform\"\n", + " model_config = {\n", + " \"measurement_noise_dist\": measurement_noise_dist,\n", + " \"measurement_noise\": measurement_noise,\n", + " }\n", + " m = ThrownObject(**model_config)\n", + " print(\"\\nExample with measurement noise\")\n", + " print(\n", + " \"- Note: outputs are different than state- this is the application of measurement noise\"\n", + " )\n", + " simulated_results = m.simulate_to_threshold(future_load, **config)\n", + " print_results(simulated_results)\n", + " plt.title(\"Ex6: Measurement noise\")\n", + "\n", + " # Ex7: OK, now for something a little more complicated. Let's try proportional noise on v only (more variation when it's going faster)\n", + " # This can be used to do custom or more complex noise distributions\n", + " def apply_proportional_process_noise(self, x, dt=1):\n", + " x[\"v\"] -= dt * 0.5 * x[\"v\"]\n", + " return x\n", + "\n", + " model_config = {\"process_noise\": apply_proportional_process_noise}\n", + " m = ThrownObject(**model_config)\n", + " print(\"\\nExample with proportional noise on velocity\")\n", + " simulated_results = m.simulate_to_threshold(future_load, **config)\n", + " print_results(simulated_results)\n", + " plt.title(\"Ex7: Proportional noise on velocity\")\n", + "\n", + " print(\n", + " \"\\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value\"\n", + " )\n", + " print(\"e.g., numpy.random.seed(42)\")\n", + " plt.show()\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/744aa18ff0c29f655326d5633e2946ab/particle_filter_battery_example.py b/docs/_downloads/744aa18ff0c29f655326d5633e2946ab/particle_filter_battery_example.py index 96593d34..3029211f 100644 --- a/docs/_downloads/744aa18ff0c29f655326d5633e2946ab/particle_filter_battery_example.py +++ b/docs/_downloads/744aa18ff0c29f655326d5633e2946ab/particle_filter_battery_example.py @@ -9,6 +9,7 @@ from progpy import * from progpy.models import BatteryElectroChemEOD + def run_example(): ## Setup # Save battery model @@ -16,24 +17,22 @@ def run_example(): dt = 1 # Process noise Q_vars = { - 'tb': 1, - 'Vo': 0.01, - 'Vsn': 0.01, - 'Vsp': 0.01, - 'qnB': 1, - 'qnS': 1, - 'qpB': 1, - 'qpS': 1 + "tb": 1, + "Vo": 0.01, + "Vsn": 0.01, + "Vsp": 0.01, + "qnB": 1, + "qnS": 1, + "qpB": 1, + "qpS": 1, } # Measurement noise - R_vars = { - 't': 2, - 'v': 0.02 - } - battery = BatteryElectroChemEOD(process_noise= Q_vars, - measurement_noise = R_vars, - dt = dt) + R_vars = {"t": 2, "v": 0.02} + battery = BatteryElectroChemEOD( + process_noise=Q_vars, measurement_noise=R_vars, dt=dt + ) load = battery.InputContainer({"i": 1}) # Optimization + def future_loading(t, x=None): return load @@ -41,33 +40,70 @@ def future_loading(t, x=None): start_u = future_loading(0) start_x = battery.initialize(start_u) start_y = battery.output(start_x) - sim_results = battery.simulate_to_threshold(future_loading, start_y, save_freq = 1) + sim_results = battery.simulate_to_threshold(future_loading, start_y, save_freq=1) # Run particle filter all_particles = [] - n_times = int(np.round(np.random.uniform(len(sim_results.times)*.25,len(sim_results.times)*.45,1)))# Random current time + n_times = int( + np.round( + np.random.uniform( + len(sim_results.times) * 0.25, len(sim_results.times) * 0.45, 1 + ) + ) + ) # Random current time for i in range(n_times): if i == 0: - batt_pf = state_estimators.ParticleFilter(model = battery, x0 = sim_results.states[i], num_particles = 250) + batt_pf = state_estimators.ParticleFilter( + model=battery, x0=sim_results.states[i], num_particles=250 + ) else: - batt_pf.estimate(t = sim_results.times[i], u = sim_results.inputs[i], z = sim_results.outputs[i]) + batt_pf.estimate( + t=sim_results.times[i], + u=sim_results.inputs[i], + z=sim_results.outputs[i], + ) all_particles.append(batt_pf.particles) # Mean of the particles alpha = 0.05 - states_vsn = [s['tb'] for s in sim_results.states] - pf_mean = [{key: np.mean(ps[key]) for key in battery.states} for ps in all_particles] - pf_low = [{key: np.quantile(ps[key], alpha / 2.0) for key in battery.states} for ps in all_particles] - pf_upp = [{key: np.quantile(ps[key], 1.0 - alpha / 2.0) for key in battery.states} for ps in all_particles] + states_vsn = [s["tb"] for s in sim_results.states] + pf_mean = [ + {key: np.mean(ps[key]) for key in battery.states} for ps in all_particles + ] + pf_low = [ + {key: np.quantile(ps[key], alpha / 2.0) for key in battery.states} + for ps in all_particles + ] + pf_upp = [ + {key: np.quantile(ps[key], 1.0 - alpha / 2.0) for key in battery.states} + for ps in all_particles + ] print("First State:", pf_mean[0]) print("Current State:", pf_mean[-1]) - plt.plot(sim_results.times[:n_times],[p['tb'] for p in pf_mean],linewidth=0.7,color="blue") - plt.plot(sim_results.times[:n_times], states_vsn[:n_times],"--",linewidth=0.7,color="red") - plt.fill_between(sim_results.times[:n_times],[p['tb'] for p in pf_low],[p['tb'] for p in pf_upp],alpha=0.5,color="blue") + plt.plot( + sim_results.times[:n_times], + [p["tb"] for p in pf_mean], + linewidth=0.7, + color="blue", + ) + plt.plot( + sim_results.times[:n_times], + states_vsn[:n_times], + "--", + linewidth=0.7, + color="red", + ) + plt.fill_between( + sim_results.times[:n_times], + [p["tb"] for p in pf_low], + [p["tb"] for p in pf_upp], + alpha=0.5, + color="blue", + ) plt.show() - -# This allows the module to be executed directly -if __name__ == '__main__': - run_example() \ No newline at end of file + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/78f1d719759ecec596bbb1d3cd04db84/sim.py b/docs/_downloads/78f1d719759ecec596bbb1d3cd04db84/sim.py index 1bc215d1..d433126f 100644 --- a/docs/_downloads/78f1d719759ecec596bbb1d3cd04db84/sim.py +++ b/docs/_downloads/78f1d719759ecec596bbb1d3cd04db84/sim.py @@ -13,6 +13,7 @@ # VVV Uncomment this to use Electro Chemistry Model VVV # Battery = BatteryElectroChem + def run_example(): # Step 1: Create a model object batt = Battery() @@ -20,13 +21,14 @@ def run_example(): # Step 2: Define future loading function - here we're using a piecewise scheme future_loading = Piecewise( batt.InputContainer, - [600, 900, 1800, 3600, float('inf')], - {'i': [2, 1, 4, 2, 3]}) + [600, 900, 1800, 3600, float("inf")], + {"i": [2, 1, 4, 2, 3]}, + ) # simulate for 200 seconds - print('\n\n------------------------------------------------') - print('Simulating for 200 seconds\n\n') - simulated_results = batt.simulate_to(200, future_loading, print = True, progress = True) + print("\n\n------------------------------------------------") + print("Simulating for 200 seconds\n\n") + simulated_results = batt.simulate_to(200, future_loading, print=True, progress=True) # The result of the simulation is now stored in simulated_results. # You can access the results by accessing the individual variables: # times, inputs, states, outputs, event_states @@ -36,25 +38,35 @@ def run_example(): simulated_results.outputs.plot() # or, with configuration - simulated_results.outputs.plot(compact = False, suptitle = 'Outputs', title = 'example title', xlabel = 'time', ylabel = 'output') + simulated_results.outputs.plot( + compact=False, + suptitle="Outputs", + title="example title", + xlabel="time", + ylabel="output", + ) # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") options = { - 'save_freq': 100, # Frequency at which results are saved - 'dt': 2, # Timestep - 'print': True, - 'progress': True + "save_freq": 100, # Frequency at which results are saved + "dt": 2, # Timestep + "print": True, + "progress": True, } simulated_results = batt.simulate_to_threshold(future_loading, **options) # Alternately, you can set a max step size and allow step size to be adjusted automatically - options['dt'] = ('auto', 2) - # set step size automatically, with a max of 2 seconds. Setting max step size automatically will allow the + options["dt"] = ("auto", 2) + # set step size automatically, with a max of 2 seconds. Setting max step size automatically will allow the # save points, stop points, and future loading change points to be met exactly - options['save_freq'] = 201 # Save every 201 seconds - options['save_pts'] = [250, 772, 1023] # Special points we should like to see reported + options["save_freq"] = 201 # Save every 201 seconds + options["save_pts"] = [ + 250, + 772, + 1023, + ] # Special points we should like to see reported simulated_results = batt.simulate_to_threshold(future_loading, **options) # Note that even though the step size is 2, the odd points in the save frequency are met perfectly, dt is adjusted automatically to capture the save points @@ -65,20 +77,21 @@ def run_example(): # This is the maximum sustainable current that can be drawn # from the battery at steady-state. It decreases with discharge # This information can be used to inform planning - pm = [batt.performance_metrics(x)['max_i'][0] for x in simulated_results.states] + pm = [batt.performance_metrics(x)["max_i"][0] for x in simulated_results.states] plt.figure() plt.plot(simulated_results.times, pm) - plt.xlabel('Time (s)') - plt.ylabel('Maximum Sustainable Current Draw (amps)') + plt.xlabel("Time (s)") + plt.ylabel("Maximum Sustainable Current Draw (amps)") # You can also change the integration method. For example: - options['integration_method'] = 'rk4' # Using Runge-Kutta 4th order + options["integration_method"] = "rk4" # Using Runge-Kutta 4th order simulated_results_rk4 = batt.simulate_to_threshold(future_loading, **options) simulated_results_rk4.outputs.plot(compact=False) plt.show() + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/793440e96b47eee071c6169a58aacb54/basic_example.py b/docs/_downloads/793440e96b47eee071c6169a58aacb54/basic_example.py index 0492b315..b9188510 100644 --- a/docs/_downloads/793440e96b47eee071c6169a58aacb54/basic_example.py +++ b/docs/_downloads/793440e96b47eee071c6169a58aacb54/basic_example.py @@ -2,12 +2,12 @@ """ This example performs a state estimation and prediction with uncertainty given a Prognostics Model. - + Method: An instance of the ThrownObject model in prog_models is created, and the prediction process is achieved in three steps: 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate 2) Prediction of future states (with uncertainty) and the times at which the event threshold will be reached 3) Metrics tools are used to further investigate the results of prediction -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) iii) Various prediction metrics @@ -17,16 +17,19 @@ from progpy.models import ThrownObject from prog_algs import * + def run_example(): # Step 1: Setup model & future loading - m = ThrownObject(process_noise = 1) - def future_loading(t, x = None): + m = ThrownObject(process_noise=1) + + def future_loading(t, x=None): # No load for a thrown object return m.InputContainer({}) + initial_state = m.initialize() # Step 2: Demonstrating state estimator - # The state estimator is used to estimate the system state given sensor data. + # The state estimator is used to estimate the system state given sensor data. print("\nPerforming State Estimation Step") # Step 2a: Setup @@ -36,22 +39,26 @@ def future_loading(t, x = None): # Step 2b: Print & Plot Prior State print("Prior State:", filt.x.mean) - print('\nevent state: ', m.event_state(filt.x.mean)) - fig = filt.x.plot_scatter(label='prior') + print("\nevent state: ", m.event_state(filt.x.mean)) + fig = filt.x.plot_scatter(label="prior") # Step 2c: Perform state estimation step, given some measurement, above what's expected - example_measurements = m.OutputContainer({'x': 7.5}) + example_measurements = m.OutputContainer({"x": 7.5}) t = 0.1 u = future_loading(t) - filt.estimate(t, u, example_measurements) # Update state, given (example) sensor data + filt.estimate( + t, u, example_measurements + ) # Update state, given (example) sensor data # Step 2d: Print & Plot Resulting Posterior State # Note the posterior state is greater than the predicted state of 5.95 - # This is because of the high measurement + # This is because of the high measurement print("\nPosterior State:", filt.x.mean) # Event state for 'falling' is less, because velocity has decreased - print('\nEvent State: ', m.event_state(filt.x.mean)) - filt.x.plot_scatter(fig=fig, label='posterior') # Add posterior state to figure from prior state + print("\nEvent State: ", m.event_state(filt.x.mean)) + filt.x.plot_scatter( + fig=fig, label="posterior" + ) # Add posterior state to figure from prior state # Note: in a prognostic application the above state estimation step would be repeated each time # there is new data. Here we're doing one step to demonstrate how the state estimator is used @@ -65,8 +72,10 @@ def future_loading(t, x = None): # Step 3b: Perform a prediction NUM_SAMPLES = 50 STEP_SIZE = 0.01 - mc_results = mc.predict(filt.x, future_loading, n_samples = NUM_SAMPLES, dt=STEP_SIZE, save_freq=STEP_SIZE) - print('Predicted time of event (ToE): ', mc_results.time_of_event.mean) + mc_results = mc.predict( + filt.x, future_loading, n_samples=NUM_SAMPLES, dt=STEP_SIZE, save_freq=STEP_SIZE + ) + print("Predicted time of event (ToE): ", mc_results.time_of_event.mean) # Here there are 2 events predicted, when the object starts falling, and when it impacts the ground. # Step 3c: Analyze the results @@ -82,36 +91,58 @@ def future_loading(t, x = None): # You can also access the final state (of type UncertainData), like so: # Note: to get a more accurate final state, you can decrease the step size. final_state = mc_results.time_of_event.final_state - print('State when object starts falling: ', final_state['falling'].mean) - + print("State when object starts falling: ", final_state["falling"].mean) + # You can also use the metrics package to generate some useful metrics on the result of a prediction print("\nEOD Prediction Metrics") from progpy.metrics import prob_success - print('\tPortion between 3.65 and 3.8: ', mc_results.time_of_event.percentage_in_bounds([3.65, 3.8], keys='falling')) - print('\tAssuming ground truth 3.7: ', mc_results.time_of_event.metrics(ground_truth=3.7, keys='falling')) - print('\tP(Success) if mission ends at 7.6: ', prob_success(mc_results.time_of_event, 7.6, keys='impact')) - # Plot state transition + print( + "\tPortion between 3.65 and 3.8: ", + mc_results.time_of_event.percentage_in_bounds([3.65, 3.8], keys="falling"), + ) + print( + "\tAssuming ground truth 3.7: ", + mc_results.time_of_event.metrics(ground_truth=3.7, keys="falling"), + ) + print( + "\tP(Success) if mission ends at 7.6: ", + prob_success(mc_results.time_of_event, 7.6, keys="impact"), + ) + + # Plot state transition # Here we will plot the states at t0, 25% to ToE, 50% to ToE, 75% to ToE, and ToE # You should see the states move together (i.e., velocity is lowest and highest when closest to the ground (before impact, and at beginning, respectively)) - fig = mc_results.states.snapshot(0).plot_scatter(label = "t={} s".format(int(mc_results.times[0]))) # 0 - quarter_index = int(len(mc_results.times)/4) - mc_results.states.snapshot(quarter_index).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index]))) # 25% - mc_results.states.snapshot(quarter_index*2).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*2]))) # 50% - mc_results.states.snapshot(quarter_index*3).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*3]))) # 75% - mc_results.states.snapshot(-1).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[-1]))) # 100% + fig = mc_results.states.snapshot(0).plot_scatter( + label="t={} s".format(int(mc_results.times[0])) + ) # 0 + quarter_index = int(len(mc_results.times) / 4) + mc_results.states.snapshot(quarter_index).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index])) + ) # 25% + mc_results.states.snapshot(quarter_index * 2).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 2])) + ) # 50% + mc_results.states.snapshot(quarter_index * 3).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 3])) + ) # 75% + mc_results.states.snapshot(-1).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[-1])) + ) # 100% # Plot time of event for each event - # If you dont see many bins here, this is because there is not much variety in the estimate. + # If you dont see many bins here, this is because there is not much variety in the estimate. # You can increase the number of bins, decrease step size, or increase the number of samples to see more of a distribution - mc_results.time_of_event.plot_hist(keys='impact') - mc_results.time_of_event.plot_hist(keys='falling') - + mc_results.time_of_event.plot_hist(keys="impact") + mc_results.time_of_event.plot_hist(keys="falling") + # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/7c5976947f77971a5ed9939937b15116/measurement_eqn_example.py b/docs/_downloads/7c5976947f77971a5ed9939937b15116/measurement_eqn_example.py index 4faa95b9..7c261ba2 100644 --- a/docs/_downloads/7c5976947f77971a5ed9939937b15116/measurement_eqn_example.py +++ b/docs/_downloads/7c5976947f77971a5ed9939937b15116/measurement_eqn_example.py @@ -1,12 +1,12 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. """ -This example performs a state estimation with uncertainty given a Prognostics Model for a system in which not all output values are measured. - -Method: An instance of the BatteryCircuit model in progpy is created. We assume that we are only measuring one of the output values, and we define a subclass to remove the other output value. +This example performs a state estimation with uncertainty given a Prognostics Model for a system in which not all output values are measured. + +Method: An instance of the BatteryCircuit model in progpy is created. We assume that we are only measuring one of the output values, and we define a subclass to remove the other output value. Estimation of the current state is performed at various time steps, using the defined state_estimator. -Results: +Results: i) Estimate of the current state given various times ii) Display of results, such as prior and posterior state estimate values and SOC """ @@ -17,65 +17,67 @@ from progpy import * + def run_example(): # Step 1: Subclass model with measurement equation # In this case we're only measuring 'v' (i.e., removing temperature) # To do this we're creating a new class that's subclassed from the complete model. # To change the outputs we just have to override outputs (the list of keys) class MyBattery(Battery): - outputs = ['v'] + outputs = ["v"] # Step 2: Setup model & future loading batt = MyBattery() loads = [ # Define loads here to accelerate prediction - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 1}), - batt.InputContainer({'i': 4}), - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 3}) + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 1}), + batt.InputContainer({"i": 4}), + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 3}), ] - def future_loading(t, x = None): - # Variable (piece-wise) future loading scheme - if (t < 600): + + def future_loading(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 600: return loads[0] - elif (t < 900): + elif t < 900: return loads[1] - elif (t < 1800): + elif t < 1800: return loads[2] - elif (t < 3000): + elif t < 3000: return loads[3] return loads[-1] - x0 = batt.parameters['x0'] + x0 = batt.parameters["x0"] # Step 3: Use the updated model filt = state_estimators.ParticleFilter(batt, x0) # Step 4: Run step and print results - print('Running state estimation step with only one of 2 outputs measured') + print("Running state estimation step with only one of 2 outputs measured") # Print Prior print("\nPrior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) # Estimate Step # Note, only voltage was needed in the measurement step, since that is the only output we're measuring t = 0.1 load = future_loading(t) - filt.estimate(t, load, {'v': 3.915}) + filt.estimate(t, load, {"v": 3.915}) # Print Posterior print("\nPosterior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) # Another Estimate Step t = 0.2 load = future_loading(t) - filt.estimate(t, load, {'v': 3.91}) + filt.estimate(t, load, {"v": 3.91}) # Print Posterior Again print("\nPosterior State (t={}):".format(t), filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) # Note that the particle filter was still able to perform state estimation. # The updated outputs can be used for any case where the measurement doesn't match the model outputs @@ -84,29 +86,31 @@ def future_loading(t, x = None): parent = Battery() - class MyBattery(Battery): - outputs = ['tv'] # output is temperature * voltage (for some reason) + outputs = ["tv"] # output is temperature * voltage (for some reason) def output(self, x): - parent.parameters = self.parameters # only needed if you expect to change parameters + parent.parameters = ( + self.parameters + ) # only needed if you expect to change parameters z = parent.output(x) - return self.OutputContainer({'tv': z['v'] * z['t']}) + return self.OutputContainer({"tv": z["v"] * z["t"]}) batt = MyBattery() filt = state_estimators.ParticleFilter(batt, x0) - print('-----------------\n\nExample 2') + print("-----------------\n\nExample 2") print("\nPrior State:", filt.x.mean) print("\toutput: ", batt.output(filt.x.mean)) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) t = 0.1 load = future_loading(t) - filt.estimate(t, load, {'tv': 80}) + filt.estimate(t, load, {"tv": 80}) print("\nPosterior State:", filt.x.mean) print("\toutput: ", batt.output(filt.x.mean)) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/7d2fab8d979444cb008019b2a50fcc14/matrix_model.ipynb b/docs/_downloads/7d2fab8d979444cb008019b2a50fcc14/matrix_model.ipynb index 53e2f667..30d5cfcf 100644 --- a/docs/_downloads/7d2fab8d979444cb008019b2a50fcc14/matrix_model.ipynb +++ b/docs/_downloads/7d2fab8d979444cb008019b2a50fcc14/matrix_model.ipynb @@ -1,54 +1,151 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nThis example shows the use of the advanced feature - matrix models. Matrix models represent the state of the system using matricies instead of dictionaries. The provided model.StateContainer, InputContainer, and OutputContainer can be treated as dictionaries but use an underly matrix. This is important for some applications like surrogate and machine-learned models where the state is represented by a tensor, and operations by matrix operations. Simulation functions propogate the state using the matrix form, preventing the inefficiency of having to convert to and from dictionaries.\n\nIn this example, a model is designed to simulate a thrown object using matrix notation (instead of dictionary notation as in the standard model). The implementation of the model is comparable to a standard model, except that it uses the x.matrix, u.matrix, and z.matirx to compute matrix operations within each function.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def run_example():\n from prog_models import PrognosticsModel\n import numpy as np\n\n # Define the model\n class ThrownObject(PrognosticsModel):\n # Define the model properties, this is exactly the same as for a regular model.\n\n inputs = [] # no inputs, no way to control\n states = [\n 'x', # Position (m) \n 'v' # Velocity (m/s)\n ]\n outputs = [\n 'x' # Position (m)\n ]\n events = [\n 'falling', # Event- object is falling\n 'impact' # Event- object has impacted ground\n ]\n\n is_vectorized = True\n\n # The Default parameters. Overwritten by passing parameters dictionary into constructor\n default_parameters = {\n 'thrower_height': 1.83, # m\n 'throwing_speed': 40, # m/s\n 'g': -9.81, # Acceleration due to gravity in m/s^2\n 'process_noise': 0.0 # amount of noise in each step\n }\n\n # Define the model equations\n def initialize(self, u = None, z = None):\n # Note: states are returned using StateContainer\n return self.StateContainer({\n 'x': self.parameters['thrower_height'], \n 'v': self.parameters['throwing_speed']})\n\n def next_state(self, x, u, dt):\n # Here we will use the matrix version for each variable\n # Note: x.matrix is a column vector\n # Note: u.matrix is a column vector\n # and u.matrix is in the order of model.inputs, above\n\n A = np.array([[0, 1], [0, 0]]) # State transition matrix\n B = np.array([[0], [self.parameters['g']]]) # Acceleration due to gravity\n x.matrix += (np.matmul(A, x.matrix) + B) * dt\n\n return x\n \n def output(self, x):\n # Note- states can still be accessed a dictionary\n return self.OutputContainer({'x': x['x']})\n\n # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.\n # Threshold = Event State == 0. However, this implementation is more efficient, so we included it\n def threshold_met(self, x):\n return {\n 'falling': x['v'] < 0,\n 'impact': x['x'] <= 0\n }\n\n def event_state(self, x): \n x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height\n x_max = np.where(x['v'] > 0, x['x'], x_max) # 1 until falling begins\n return {\n 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed\n 'impact': np.maximum(x['x']/x_max,0) # then it's fraction of height\n }\n\n # Now we can use the model\n # Create the model\n thrown_object = ThrownObject()\n\n # Use the model\n x = thrown_object.initialize()\n print('State at 0.1 seconds: ', thrown_object.next_state(x, {}, 0.1))\n\n # But you can also initialize state directly, like so:\n x = thrown_object.StateContainer({'x': 1.93, 'v': 40})\n print('State at 0.1 seconds: ', thrown_object.next_state(x, None, 0.1))\n\n # Now lets use it for simulation.\n def future_loading(t, x=None):\n return thrown_object.InputContainer({})\n\n thrown_object.simulate_to_threshold(\n future_loading, \n print = True, \n threshold_keys = 'impact', \n dt = 0.1, \n save_freq = 1)\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nThis example shows the use of the advanced feature - matrix models. Matrix models represent the state of the system using matricies instead of dictionaries. The provided model.StateContainer, InputContainer, and OutputContainer can be treated as dictionaries but use an underly matrix. This is important for some applications like surrogate and machine-learned models where the state is represented by a tensor, and operations by matrix operations. Simulation functions propogate the state using the matrix form, preventing the inefficiency of having to convert to and from dictionaries.\n\nIn this example, a model is designed to simulate a thrown object using matrix notation (instead of dictionary notation as in the standard model). The implementation of the model is comparable to a standard model, except that it uses the x.matrix, u.matrix, and z.matirx to compute matrix operations within each function.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def run_example():\n", + " from prog_models import PrognosticsModel\n", + " import numpy as np\n", + "\n", + " # Define the model\n", + " class ThrownObject(PrognosticsModel):\n", + " # Define the model properties, this is exactly the same as for a regular model.\n", + "\n", + " inputs = [] # no inputs, no way to control\n", + " states = [\n", + " \"x\", # Position (m)\n", + " \"v\", # Velocity (m/s)\n", + " ]\n", + " outputs = [\n", + " \"x\" # Position (m)\n", + " ]\n", + " events = [\n", + " \"falling\", # Event- object is falling\n", + " \"impact\", # Event- object has impacted ground\n", + " ]\n", + "\n", + " is_vectorized = True\n", + "\n", + " # The Default parameters. Overwritten by passing parameters dictionary into constructor\n", + " default_parameters = {\n", + " \"thrower_height\": 1.83, # m\n", + " \"throwing_speed\": 40, # m/s\n", + " \"g\": -9.81, # Acceleration due to gravity in m/s^2\n", + " \"process_noise\": 0.0, # amount of noise in each step\n", + " }\n", + "\n", + " # Define the model equations\n", + " def initialize(self, u=None, z=None):\n", + " # Note: states are returned using StateContainer\n", + " return self.StateContainer(\n", + " {\n", + " \"x\": self.parameters[\"thrower_height\"],\n", + " \"v\": self.parameters[\"throwing_speed\"],\n", + " }\n", + " )\n", + "\n", + " def next_state(self, x, u, dt):\n", + " # Here we will use the matrix version for each variable\n", + " # Note: x.matrix is a column vector\n", + " # Note: u.matrix is a column vector\n", + " # and u.matrix is in the order of model.inputs, above\n", + "\n", + " A = np.array([[0, 1], [0, 0]]) # State transition matrix\n", + " B = np.array([[0], [self.parameters[\"g\"]]]) # Acceleration due to gravity\n", + " x.matrix += (np.matmul(A, x.matrix) + B) * dt\n", + "\n", + " return x\n", + "\n", + " def output(self, x):\n", + " # Note- states can still be accessed a dictionary\n", + " return self.OutputContainer({\"x\": x[\"x\"]})\n", + "\n", + " # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.\n", + " # Threshold = Event State == 0. However, this implementation is more efficient, so we included it\n", + " def threshold_met(self, x):\n", + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}\n", + "\n", + " def event_state(self, x):\n", + " x_max = x[\"x\"] + np.square(x[\"v\"]) / (\n", + " -self.parameters[\"g\"] * 2\n", + " ) # Use speed and position to estimate maximum height\n", + " x_max = np.where(x[\"v\"] > 0, x[\"x\"], x_max) # 1 until falling begins\n", + " return {\n", + " \"falling\": np.maximum(\n", + " x[\"v\"] / self.parameters[\"throwing_speed\"], 0\n", + " ), # Throwing speed is max speed\n", + " \"impact\": np.maximum(x[\"x\"] / x_max, 0), # then it's fraction of height\n", + " }\n", + "\n", + " # Now we can use the model\n", + " # Create the model\n", + " thrown_object = ThrownObject()\n", + "\n", + " # Use the model\n", + " x = thrown_object.initialize()\n", + " print(\"State at 0.1 seconds: \", thrown_object.next_state(x, {}, 0.1))\n", + "\n", + " # But you can also initialize state directly, like so:\n", + " x = thrown_object.StateContainer({\"x\": 1.93, \"v\": 40})\n", + " print(\"State at 0.1 seconds: \", thrown_object.next_state(x, None, 0.1))\n", + "\n", + " # Now lets use it for simulation.\n", + " def future_loading(t, x=None):\n", + " return thrown_object.InputContainer({})\n", + "\n", + " thrown_object.simulate_to_threshold(\n", + " future_loading, print=True, threshold_keys=\"impact\", dt=0.1, save_freq=1\n", + " )\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/7e3375e02d53d89020ab722466e41449/benchmarking.py b/docs/_downloads/7e3375e02d53d89020ab722466e41449/benchmarking.py index 8756c210..a57ac19c 100644 --- a/docs/_downloads/7e3375e02d53d89020ab722466e41449/benchmarking.py +++ b/docs/_downloads/7e3375e02d53d89020ab722466e41449/benchmarking.py @@ -8,25 +8,30 @@ from progpy.models import BatteryCircuit from timeit import timeit + def run_example(): # Step 1: Create a model object batt = BatteryCircuit() - - # Step 2: Define future loading function - loading = batt.InputContainer({'i': 2}) # Constant loading + + # Step 2: Define future loading function + loading = batt.InputContainer({"i": 2}) # Constant loading + def future_loading(t, x=None): # Constant Loading return loading # Step 3: Benchmark simulation of 600 seconds - print('Benchmarking...') - def sim(): + print("Benchmarking...") + + def sim(): batt.simulate_to(600, future_loading) + time = timeit(sim, number=500) # Print results - print('Simulation Time: {} ms/sim'.format(time)) + print("Simulation Time: {} ms/sim".format(time)) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/7f2f8df9098381336a6c1a5199ddd597/sim_valve.py b/docs/_downloads/7f2f8df9098381336a6c1a5199ddd597/sim_valve.py index 320967d4..d869d507 100644 --- a/docs/_downloads/7f2f8df9098381336a6c1a5199ddd597/sim_valve.py +++ b/docs/_downloads/7f2f8df9098381336a6c1a5199ddd597/sim_valve.py @@ -2,74 +2,85 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a pneumatic valve being simulated until threshold is met. +Example of a pneumatic valve being simulated until threshold is met. """ from progpy.models.pneumatic_valve import PneumaticValve -def run_example(): + +def run_example(): # Create a model object - valv = PneumaticValve(process_noise= 0) + valv = PneumaticValve(process_noise=0) # Define future loading function cycle_time = 20 + def future_loading(t, x=None): - t = t % cycle_time - if t < cycle_time/2: - return valv.InputContainer({ - 'pL': 3.5e5, - 'pR': 2.0e5, + t = t % cycle_time + if t < cycle_time / 2: + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, # Open Valve - 'uTop': False, - 'uBot': True - }) - return valv.InputContainer({ - 'pL': 3.5e5, - 'pR': 2.0e5, + "uTop": False, + "uBot": True, + } + ) + return valv.InputContainer( + { + "pL": 3.5e5, + "pR": 2.0e5, # Close Valve - 'uTop': True, - 'uBot': False - }) + "uTop": True, + "uBot": False, + } + ) # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") # Configure options config = { - 'dt': 0.01, - 'horizon': 800, - 'save_freq': 60, - 'print': True, - 'progress': True, + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, } # Set wear parameter for spring to 1 - valv.parameters['x0']['wk'] = 1 + valv.parameters["x0"]["wk"] = 1 # Define first measured output. This is needed by the simulate_to_threshold method to initialize state first_output = valv.output(valv.initialize(future_loading(0))) # Simulate - simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config) + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) # Simulate to threshold again but with a different wear mode - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") # Configure options config = { - 'dt': 0.01, - 'horizon': 800, - 'save_freq': 60, - 'print': True, - 'progress': True + "dt": 0.01, + "horizon": 800, + "save_freq": 60, + "print": True, + "progress": True, } # Reset wear parameter for spring to 0, set wear parameter for friction to 1 - valv.parameters['x0']['wk'] = 0 - valv.parameters['x0']['wr'] = 1 + valv.parameters["x0"]["wk"] = 0 + valv.parameters["x0"]["wr"] = 1 # Define first measured output. This is needed by the simulate_to_threshold method to initialize state first_output = valv.output(valv.initialize(future_loading(0))) # Simulate - simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config) + simulated_results = valv.simulate_to_threshold( + future_loading, first_output, **config + ) + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/80b28bfd8fd43a87986023694c882032/04_New Models.ipynb b/docs/_downloads/80b28bfd8fd43a87986023694c882032/04_New Models.ipynb new file mode 100644 index 00000000..328e7eda --- /dev/null +++ b/docs/_downloads/80b28bfd8fd43a87986023694c882032/04_New Models.ipynb @@ -0,0 +1,1943 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 4. Defining New Physics-Based Prognostic Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "All of the previous sections describe how to use an existing model. In this section, we will explore how to create a new physics-based model. \n", + "\n", + "A physics-based model is a model where behavior is described by the physics of the system. Physics-based models are typically parameterized, so that exact behavior of the system can be configured or learned (through parameter estimation). \n", + "\n", + "For training and creating data-driven models, see __[05 Data Driven](05_Data%20Driven.ipynb)__." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Linear Models](#Linear-Models)\n", + "* [State Transition Models](#State-Transition-Models)\n", + "* [Direct Models](#Direct-Models)\n", + "* [Advanced Features](#Advanced-Features)\n", + " * [Derived Parameters](#Derived-Parameters)\n", + " * [Matrix Data Access](#Matrix-Data-Access)\n", + " * [State Limits](#State-Limits)\n", + " * [Custom Events](#Custom-Events)\n", + " * [Serialization](#Serialization)\n", + "* [Simplified Battery Model Example](#Simplified-Battery-Model-Example)\n", + " * [State Transition](#State-Transition)\n", + " * [Outputs](#Outputs)\n", + " * [Events](#Events)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Linear Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The easiest model to build is a linear model. Linear models are defined as a linear time series, which can be defined by the following equations:\n", + "\n", + "\n", + "\n", + "**The State Equation**:\n", + "$$\n", + "\\frac{dx}{dt} = Ax + Bu + E\n", + "$$\n", + "\n", + "**The Output Equation**:\n", + "$$\n", + "z = Cx + D\n", + "$$\n", + "\n", + "**The Event State Equation**:\n", + "$$\n", + "es = Fx + G\n", + "$$\n", + "\n", + "$x$ is `state`, $u$ is `input`, $z$ is `output`, and $es$ is `event state`\n", + "\n", + "Linear models are defined by creating a new model class that inherits from progpy's `LinearModel` class and defines the following properties:\n", + "* $A$: 2-D np.array[float], dimensions: n_states x n_states. The state transition matrix. It dictates how the current state affects the change in state dx/dt.\n", + "* $B$: 2-D np.array[float], optional (zeros by default), dimensions: n_states x n_inputs. The input matrix. It dictates how the input affects the change in state dx/dt.\n", + "* $C$: 2-D np.array[float], dimensions: n_outputs x n_states. The output matrix. It determines how the state variables contribute to the output.\n", + "* $D$: 1-D np.array[float], optional (zeros by default), dimensions: n_outputs x 1. A constant term that can represent any biases or offsets in the output.\n", + "* $E$: 1-D np.array[float], optional (zeros by default), dimensions: n_states x 1. A constant term, representing any external effects that are not captured by the state and input.\n", + "* $F$: 2-D np.array[float], dimensions: n_es x n_states. The event state matrix, dictating how state variables contribute to the event state.\n", + "* $G$: 1-D np.array[float], optional (zeros by default), dimensions: n_es x 1. A constant term that can represent any biases or offsets in the event state.\n", + "* __inputs__: list[str] - `input` keys\n", + "* __states__: list[str] - `state` keys\n", + "* __outputs__: list[str] - `output` keys\n", + "* __events__: list[str] - `event` keys" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now utilize our `LinearModel` to model the classical physics problem throwing an object into the air. This is a common example model, the non-linear version of which (`progpy.examples.ThrownObject`) has been used frequently throughout the examples. This version of ThrownObject will behave almost identically to the non-linear `ThrownObject`, except it will not have the non-linear effects of air resistance.\n", + "\n", + "We can create a subclass of `LinearModel` to simulate an object thrown, which we will call the `ThrownObject` class. Let's start with some definitions for our model:\n", + "\n", + "**Events**: (2)\n", + "* `falling`: The object is falling\n", + "* `impact`: The object has hit the ground\n", + "\n", + "**Inputs/Loading**: (0)\n", + "* `None`\n", + "\n", + "**States**: (2)\n", + "* `x`: Position in space (m)\n", + "* `v`: Velocity in space (m/s)\n", + "\n", + "**Outputs/Measurements**: (1)\n", + "* `x`: Position in space (m)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, for our keyword arguments:\n", + "\n", + "* __thrower_height : Optional, float__\n", + " * Height of the thrower (m). Default is 1.83 m\n", + "* __throwing_speed : Optional, float__\n", + " * Speed at which the ball is thrown (m/s). Default is 40 m/s" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With our definitions, we can now create the `ThrownObject` model.\n", + "\n", + "First, we need to import the necessary packages." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from progpy import LinearModel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we'll define some features of a `ThrownObject` `LinearModel`. Recall that all `LinearModel` classes follow a set of core equations and require some specific properties, as noted earlier. In this next step, we'll define our inputs, states, outputs, and events, along with the $A$, $C$, $E$, and $F$ values.\n", + "\n", + "First, let's consider state transition. For an object thrown into the air without air resistance, velocity would decrease linearly by -9.81 \n", + "$\\dfrac{m}{s^2}$ due to the effect of gravity, as described below:\n", + "\n", + " $$\\frac{dv}{dt} = -9.81$$\n", + "\n", + " Position change is defined by velocity (v), as described below:\n", + " \n", + " $$\\frac{dx}{dt} = v$$\n", + "\n", + "For the above equation x is position not state. Combining these equations with the model $\\frac{dx}{dt}$ equation defined above yields the $A$ and $E$ matrix defined below. There is no $B$ defined because this model does not have any inputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject(LinearModel):\n", + " events = [\"impact\"]\n", + " inputs = []\n", + " states = [\"x\", \"v\"]\n", + " outputs = [\"x\"]\n", + "\n", + " A = np.array([[0, 1], [0, 0]])\n", + " C = np.array([[1, 0]])\n", + " E = np.array([[0], [-9.81]])\n", + " F = None" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that we defined our $A$, $C$, $E$, and $F$ values to fit the dimensions that were stated at the beginning of the notebook! Since the parameter $F$ is not optional, we have to explicitly set the value as `None`.\n", + "\n", + "Next, we'll define some default parameters for our `ThrownObject` model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject(ThrownObject): # Continue the ThrownObject class\n", + " default_parameters = {\n", + " \"thrower_height\": 1.83,\n", + " \"throwing_speed\": 40,\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the following cells, we'll define some class functions necessary to perform prognostics on the model.\n", + "\n", + "The `initialize()` function sets the initial system state. Since we have defined the `x` and `v` values for our `ThrownObject` model to represent position and velocity in space, our initial values would be the `thrower_height` and `throwing_speed` parameters, respectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject(ThrownObject):\n", + " def initialize(self, u=None, z=None):\n", + " return self.StateContainer(\n", + " {\"x\": self[\"thrower_height\"], \"v\": self[\"throwing_speed\"]}\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For our `threshold_met()` equation, we will define the function to return `True` for event `falling` when our model has a velocity value less than 0 (object is 'falling') and for event `impact` when our thrown object has a distance from the ground less than or equal to 0 (object is on the ground, or has made 'impact').\n", + "\n", + "`threshold_met()` returns a _dict_ of values, if each entry of the _dict_ is true, then our threshold has been met." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject(ThrownObject):\n", + " def threshold_met(self, x):\n", + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, for our `event_state()` equation, we will calculate the measurement of progress towards the events. We will normalize our values such that they are in the range of 0 to 1, where 0 means the event has occurred." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject(ThrownObject):\n", + " def event_state(self, x):\n", + " x_max = x[\"x\"] + np.square(x[\"v\"]) / (9.81 * 2)\n", + " return {\n", + " \"falling\": np.maximum(x[\"v\"] / self[\"throwing_speed\"], 0),\n", + " \"impact\": np.maximum(x[\"x\"] / x_max, 0) if x[\"v\"] < 0 else 1,\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With these functions created, we can now use the `simulate_to_threshold()` function to simulate the movement of the thrown object in air. Let's run the simulation. For more information, see __[01 Simulation](01_Simulation.ipynb)__." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = ThrownObject()\n", + "simulated_results = m.simulate_to_threshold(\n", + " print=True, save_freq=1, events=\"impact\", dt=0.1\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since our model takes in no inputs, we have no need to define a future loading function. However, for most models, there would be inputs, and thus a need for a future loading function. For more information on future loading functions and when to use them, please refer to the future loading section in __[01 Simulation](01_Simulation.ipynb)__.\n", + "\n", + "Let's take a look at the outputs of this model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " title=\"ThrownObject model simulation output\",\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"position (m)\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that that plot resembles a parabola, which represents the position of the ball through space as time progresses. For more information on `LinearModel`, see the [LinearModel](https://nasa.github.io/progpy/api_ref/prog_models/LinearModel.html) documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## State Transition Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the previous section, we defined a new prognostic model using the `LinearModel` class. This can be a powerful tool for defining models that can be described as a linear time series. \n", + "\n", + "Physics-based state transition models that cannot be described linearly are constructed by subclassing `PrognosticsModel`. For more information, refer to the [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel) documentation. \n", + "\n", + "To demonstrate this, we'll create a new model class that inherits from `PrognosticsModel`. This inheritance allows the new model to use the analysis and simulation tools from `PrognosticsModel`.\n", + "\n", + "Let's create a simple state-transition model of an object thrown upwards in the air without air resistance. Note that this is the same dynamic system as the linear model example above, but formulated differently.\n", + "\n", + "First, we'll import the necessary packages and classes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from progpy import PrognosticsModel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll define our model class. PrognosticsModels require defining [inputs](https://nasa.github.io/progpy/glossary.html#term-input), [states](https://nasa.github.io/progpy/glossary.html#term-state), [outputs](https://nasa.github.io/progpy/glossary.html#term-output), and [event](https://nasa.github.io/progpy/glossary.html#term-event) keys. As in the above example, the states include position (`x`) and velocity(`v`) of the object, the output is position (`x`), and the events are `falling` and `impact`. \n", + "\n", + "We will define this new class as `ThrownObject_ST` to distinguish it as a state-transition model compared to the previous linear model class. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(PrognosticsModel):\n", + " \"\"\"\n", + " Model that simulates an object thrown into the air without air resistance\n", + " \"\"\"\n", + "\n", + " inputs = [] # no inputs, no way to control\n", + " states = [\n", + " \"x\", # Position (m)\n", + " \"v\", # Velocity (m/s)\n", + " ]\n", + " outputs = [ # Anything we can measure\n", + " \"x\" # Position (m)\n", + " ]\n", + " events = [\n", + " \"falling\", # Event- object is falling\n", + " \"impact\", # Event- object has impacted ground\n", + " ]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll add some default parameter definitions. These values can be overwritten by passing parameters into the constructor. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(ThrownObject_ST):\n", + " default_parameters = {\n", + " \"thrower_height\": 1.83, # default height\n", + " \"throwing_speed\": 40, # default speed\n", + " \"g\": -9.81, # Acceleration due to gravity (m/s^2)\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "All prognostics models require some specific class functions. We'll define those next.\n", + "\n", + "We'll first add the functionality to set the initial state of the system. There are two ways to provide the logic to initialize model state. \n", + "\n", + "1. Provide the initial state in `parameters['x0']`\n", + "2. Provide an `initialize` function \n", + "\n", + "The first method is preferred since defining `parameters['x0']` means we don't need to explicitly define an `initialize` method as these parameter values will already be used as the initial state.\n", + "\n", + "However, there are some cases where the initial state is a function of the input (`u`) or output (`z`) (e.g. a use-case where the input is also a state), so an explicitly defined `initialize` method is required. \n", + "\n", + "In this example, we could set our initial state by simply defining `parameters['x0']`. However, we will use an `initialize` function for ease when using the `derived_params` feature, which will be discussed in the next section. In the code below, note that the function can take arguments for both input `u` and output `z`, though these are optional." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(ThrownObject_ST):\n", + " def initialize(self, u=None, z=None):\n", + " return self.StateContainer(\n", + " {\n", + " \"x\": self[\n", + " \"thrower_height\"\n", + " ], # Initial height from which the ball is thrown\n", + " \"v\": self[\"throwing_speed\"], # Velocity at which the ball is thrown\n", + " }\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `PrognosticsModel` class requires that we define how the state transitions throughout time. For continuous models, this is defined with the method `dx`, which calculates the first derivative of the state at a specific time. For discrete systems, this is defined with the method `next_state`, using the state transition equation for the system. When possible, it is recommended to use the continuous (`dx`) form, as some algorithms will only work on continuous models.\n", + "\n", + "Here, we will use the equations for the derivatives of our system (i.e., the continuous form)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(ThrownObject_ST):\n", + " def dx(self, x, u):\n", + " return self.StateContainer(\n", + " {\"x\": x[\"v\"], \"v\": self[\"g\"]}\n", + " ) # Acceleration of gravity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll define the `output` method, which will calculate the output (i.e., measurable values) given the current state. Here, our output is position (`x`). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(ThrownObject_ST):\n", + " def output(self, x):\n", + " return self.OutputContainer({\"x\": x[\"x\"]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next method we define is [`event_state`](https://nasa.github.io/progpy/glossary.html#term-event-state). As before, \n", + "`event_state` calculates the progress towards the events. This is normalized to be between 0 and 1, where 1 means there is no progress towards the event and 0 means the event has occurred. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(ThrownObject_ST):\n", + " def event_state(self, x):\n", + " # Use speed and position to estimate maximum height\n", + " x_max = x[\"x\"] + np.square(x[\"v\"]) / (-self[\"g\"] * 2)\n", + " # 1 until falling begins\n", + " x_max = np.where(x[\"v\"] > 0, x[\"x\"], x_max)\n", + " return {\n", + " \"falling\": max(\n", + " x[\"v\"] / self[\"throwing_speed\"], 0\n", + " ), # Throwing speed is max speed\n", + " \"impact\": max(\n", + " x[\"x\"] / x_max, 0\n", + " ), # 1 until falling begins, then it's fraction of height\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "At this point, we have defined all necessary information for our new model to be complete. There are other methods that can be defined to provide additional configuration, and we'll highlight some of them in the following sections. We can also refer to the [PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html) documentation for more information.\n", + "\n", + "For example, we can optionally include a `threshold_met` equation. Without an explicit definition, `threshold_met` will use the event state to define thresholds (threshold = event state == 0). However, this implementation is more efficient, so we will include it.\n", + "\n", + "We will define `threshold_met` in the same way as our linear model example. `threshold_met` will return a _dict_ of values, one for each event. The threshold will be met when all dictionary entries are `True`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(ThrownObject_ST):\n", + " def threshold_met(self, x):\n", + " return {\n", + " \"falling\": x[\"v\"] < 0, # Falling occurs when velocity becomes negative\n", + " \"impact\": x[\"x\"]\n", + " <= 0, # Impact occurs when the object hits the ground, i.e. position is <= 0\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have now created a new `ThrownObject_ST` model. Let's now test our model through simulation. \n", + "\n", + "First, we'll create an instance of the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_st = ThrownObject_ST()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll then simulate to impact, as specified in `events`. For more information on how simulation works, refer to __[01 Simulation](01_Simulation.ipynb)__. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Simulate to impact\n", + "event = \"impact\"\n", + "simulated_results = m_st.simulate_to_threshold(\n", + " events=event, dt=0.005, save_freq=1, print=True\n", + ")\n", + "\n", + "# Print result:\n", + "print(\n", + " \"The object hit the ground in {} seconds\".format(\n", + " round(simulated_results.times[-1], 2)\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now plot the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " title=\"ThrownObject_ST model simulation output\",\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"position (m)\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see the parabolic shape of the object being thrown in the air.\n", + "\n", + "We have so far illustrated how to construct new physics-based models by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel). Some elements (e.g. `inputs`, `states`, `outputs`, events keys, methods for initialization, `dx` or `next_state`, `output`, and `event_state`) are required. Models can be additionally configured with additional methods and parameters.\n", + "\n", + "In the previous example, we defined each part one piece at a time, recursively subclassing the partially defined class. This was done to illustrate the parts of the model. In actual usage, all methods and properties should be defined together in a single class definition." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Direct Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the previous sections, we illustrated how to create and use state-transition models, or models that use state transition differential equations to propagate the state forward. In this example, we'll explore another type of model implemented within ProgPy: direct models. \n", + "\n", + "Direct models estimate the time of event directly from the system state and future load, rather than through state transitions. This approach is particularly useful for physics-based models where the differential equations of state transitions can be explicitly solved, or for data-driven models that map sensor data directly to the time of an event. When applicable, using a direct model approach provides a more efficient way to estimate the time of an event, especially for events that occur later in the simulation.\n", + "\n", + "To illustrate this concept, we will extend the state-transition model, `ThrownObject_ST`, defined above, to create a new model class, `DirectThrownObject`. The dynamics of a thrown object lend easily to a direct model, since we can solve the differential equations explicitly to estimate the time at which the events occur. \n", + "\n", + "Recall that our physical system is described by the following differential equations: \n", + "\\begin{align*}\n", + "\\frac{dx}{dt} &= v \\\\ \\\\\n", + "\\frac{dv}{dt} &= -g \n", + "\\end{align*}\n", + "\n", + "This can be solved explicity given initial position $x_0$ and initial velocity $v_0$:\n", + "\\begin{align*}\n", + "x(t) &= -\\frac{1}{2} gt^2 + v_0 t + x_0 \\\\ \\\\ \n", + "v(t) &= -gt + v_0\n", + "\\end{align*}\n", + "\n", + "Setting these equations to 0 and solving for time, we get the time at which the object hits the ground and begins falling, respectively. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To construct our direct model, we'll extend the `ThrownObject_ST` model to additionally include the method [time_to_event](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel.time_of_event). This method will calculate the time at which each event occurs (i.e., time when the event threshold is met), based on the equations above. `time_of_event` must be implemented by any direct model. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class DirectThrownObject(ThrownObject_ST):\n", + " def time_of_event(self, x, *args, **kwargs):\n", + " # calculate time when object hits ground given x['x'] and x['v']\n", + " # 0 = x0 + v0*t - 0.5*g*t^2\n", + " g = self[\"g\"]\n", + " t_impact = -(x[\"v\"] + np.sqrt(x[\"v\"] * x[\"v\"] - 2 * g * x[\"x\"])) / g\n", + "\n", + " # 0 = v0 - g*t\n", + " t_falling = -x[\"v\"] / g\n", + "\n", + " return {\"falling\": t_falling, \"impact\": t_impact}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With this, our direct model is created. Note that adding `*args` and `**kwargs` is optional. Having these arguments makes the function interchangeable with other models which may have arguments or keyword arguments. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's test out this capability. To do so, we'll use the `time` package to compare the direct model to our original timeseries model. \n", + "\n", + "Let's start by creating an instance of our timeseries model, calculating the time of event, and timing this computation. Note that for a state transition model, `time_of_event` still returns the time at which `threshold_met` returns true for each event, but this is calculated by simulating to threshold." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "m_timeseries = ThrownObject_ST()\n", + "x = m_timeseries.initialize()\n", + "print(\n", + " m_timeseries.__class__.__name__,\n", + " \"(Direct Model)\" if m_timeseries.is_direct else \"(Timeseries Model)\",\n", + ")\n", + "tic = time.perf_counter()\n", + "print(\"Time of event: \", m_timeseries.time_of_event(x, dt=0.05))\n", + "toc = time.perf_counter()\n", + "print(f\"execution: {(toc - tic) * 1000:0.4f} milliseconds\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's do the same using our direct model implementation. In this case, when `time_to_event` is called, the event time will be estimated directly from the state, instead of through simulation to threshold. \n", + "\n", + "Note that a limitation of a direct model is that you cannot get intermediate states (i.e., `save_pts` or `save_freq`) since the time of event is calculated directly. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_direct = DirectThrownObject()\n", + "x = m_direct.initialize() # Using Initial state\n", + "# Now instead of simulating to threshold, we can estimate it directly from the state, like so\n", + "print(\n", + " \"\\n\",\n", + " m_direct.__class__.__name__,\n", + " \"(Direct Model)\" if m_direct.is_direct else \"(Timeseries Model)\",\n", + ")\n", + "tic = time.perf_counter()\n", + "print(\"Time of event: \", m_direct.time_of_event(x))\n", + "toc = time.perf_counter()\n", + "print(f\"execution: {(toc - tic) * 1000:0.4f} milliseconds\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that execution is significantly faster for the direct model. Furthermore, the result is actually more accurate, since it's not limited by the timestep (see dt section in __[01 Simulation](01_Simulation.ipynb)__). These observations will be even more pronounced for events that occur later in the simulation. \n", + "\n", + "It's important to note that this is a very simple example, as there are no inputs. For models with inputs, future loading must be provided to `time_of_event` (see the future loading section in __[01 Simulation](01_Simulation.ipynb)__). In these cases, most direct models will encode or discretize the future loading profile to use it in a direct estimation of time of event.\n", + "\n", + "In the example provided, we have illustrated how to use a direct model. Direct models are a powerful tool for estimating the time of an event directly from the system state. By avoiding the process of state transitions, direct models can provide more efficient event time estimates. Additionally, the direct model approach is not limited to physics-based models. It can also be applied to data-driven models that can map sensor data directly to the time of an event. \n", + "\n", + "In conclusion, direct models offer an efficient and versatile approach for prognostics modeling, enabling faster and more direct estimations of event times. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Advanced Features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Derived Parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the previous section, we constructed a new model from scratch by subclassing from [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel) and specifying all of the necessary model components. An additional optional feature of `PrognosticsModel` is derived parameters, illustrated below. \n", + "\n", + "A derived parameter is a parameter that is a function of another parameter. For example, in the case of a thrown object, one could assume that throwing speed is a function of thrower height, with taller throwing height resulting in faster throwing speeds. In the electrochemistry battery model (see __[03 Included Models](03_Existing%20Models.ipynb)__), there are parameters for the maximum and minimum charge at the surface and bulk, and these are dependent on the capacity of the battery (i.e. another parameter, `qMax`). When such derived parameters exist, they must be updated whenever the parameters they depend on are updated. In `PrognosticsModels`, this is achieved with the `derived_params` feature. \n", + "\n", + "This feature can also be used to cache combinations of parameters that are used frequently in state transition or other model methods. Creating lumped parameters using `derived_params` causes them to be calculated once when configuring, instead of each time step in simulation or prediction. \n", + "\n", + "For this example, we will use the `ThrownObject_ST` model created in a previous section. We will extend this model to include a derived parameter, namely `throwing_speed` to be dependent on `thrower_height`.\n", + "\n", + "To implement this, we must first define a function for the relationship between the two parameters. We'll assume that `throwing_speed` is a linear function of `thrower_height`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def update_thrown_speed(params):\n", + " return {\"throwing_speed\": params[\"thrower_height\"] * 21.85}\n", + " # One or more parameters can be changed in these functions, and parameters that are changed are returned in the dictionary" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll define the parameter callbacks, so that `throwing_speed` is updated appropriately any time that `thrower_height` changes. The following effectively tells the derived callbacks feature to call the `update_thrown_speed` function whenever the `thrower_height` changes. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_ST(ThrownObject_ST):\n", + " param_callbacks = {\"thrower_height\": [update_thrown_speed]}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also have more than one function be called when a single parameter is changed. We could do this by adding the additional callbacks to the list (e.g., `thrower_height`: [`update_thrown_speed`, `other_fcn`])\n", + "\n", + "We have now added the capability for `throwing_speed` to be a derived parameter. Let's try it out. First, we'll create an instance of our class and print out the default parameters. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obj = ThrownObject_ST()\n", + "print(\n", + " \"Default Settings:\\n\\tthrower_height: {}\\n\\tthrowing_speed: {}\".format(\n", + " obj[\"thrower_height\"], obj[\"throwing_speed\"]\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's change the thrower height. If our derived parameters work correctly, the thrower speed should change accordingly. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obj[\"thrower_height\"] = 1.75 # Our thrower is 1.75 m tall\n", + "print(\n", + " \"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(\n", + " obj[\"thrower_height\"], obj[\"throwing_speed\"]\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, when the thrower height was changed, the throwing speed was re-calculated too. \n", + "\n", + "In this example, we illustrated how to use the `derived_params` feature, which allows a parameter to be a function of another parameter. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Matrix Data Access" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the above models, we have used dictionaries to represent the states. For example, in the implementation of `ThrownObject_ST` above, see how `dx` is defined with a `StateContainer` dictionary. While all models can be constructed using dictionaries in this way, some dynamic systems allow for the state of the system to be represented with a matrix. For such use-cases, ProgPy has an advanced matrix data access feature that provides a more efficient way to define these models.\n", + "\n", + "In ProgPy's implementation, the provided model. `StateContainer`, `InputContainer`, and `OutputContainer` can be treated as dictionaries but use an underlying matrix. This is important for some applications like surrogate and machine-learned models where the state is represented by a tensor. ProgPy's matrix data access feature allows the matrices to be used directly. Simulation functions propagate the state using the matrix form, preventing the inefficiency of having to convert to and from dictionaries. Additionally, this implementation is faster than recreating the `StateContainer` each time, especially when updating in place.\n", + "\n", + "In this example, we'll illustrate how to use the matrix data access feature. We'll continue with our `ThrownObject` system, and create a model to simulate this using matrix notation (instead of dictionary notation as in the standard model, seen above in `ThrownObject_ST`). The implementation of the model is comparable to a standard model, except that it uses matrix operations within each function, as seen below. \n", + "\n", + "First, the necessary imports." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import time" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use the matrix data access feature, we'll subclass from our state-transition model defined above, `ThrownObject_ST`. Our new model will therefore inherit the default parameters and methods for initialization, output, threshold met, and event state. \n", + "\n", + "To use the matrix data access feature, we'll use matrices to define how the state transitions. Since we are working with a discrete version of the system now, we'll define the `next_state` method, and this will override the `dx` method in the parent class. \n", + "\n", + "In the following, we will use the matrix version for each variable, accessed with `.matrix`. We implement this within `next_state`, but this feature can also be used in other functions. Here, both `x.matrix` and `u.matrix` are column vectors, and `u.matrix` is in the same order as `model.inputs`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ThrownObject_MM(ThrownObject_ST):\n", + " def next_state(self, x, u, dt):\n", + " A = np.array([[0, 1], [0, 0]]) # State transition matrix\n", + " B = np.array([[0], [self[\"g\"]]]) # Acceleration due to gravity\n", + " x.matrix += (np.matmul(A, x.matrix) + B) * dt\n", + "\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our model is now specified. Let's try simulating with it.\n", + "\n", + "Let's create an instance of the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_matrix = ThrownObject_MM()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's simulate to threshold. We'll also time the simulation so we can compare with the non-matrix state-transition model below. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tic_matrix = time.perf_counter()\n", + "\n", + "# Simulate to threshold\n", + "m_matrix.simulate_to_threshold(print=True, events=\"impact\", dt=0.1, save_freq=1)\n", + "\n", + "toc_matrix = time.perf_counter()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our matrix notation was successful in simulating the thrown object's behavior throughout time. \n", + "\n", + "Finally, let's simulate the non-matrix version to compare computation time. \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tic_st = time.perf_counter()\n", + "m_st.simulate_to_threshold(print=True, events=\"impact\", dt=0.1, save_freq=1)\n", + "toc_st = time.perf_counter()\n", + "\n", + "print(f\"Matrix execution: {(toc_matrix - tic_matrix) * 1000:0.4f} milliseconds\")\n", + "print(f\"Non-matrix execution: {(toc_st - tic_st) * 1000:0.4f} milliseconds\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, for this system, using the matrix data access feature is computationally faster than a standard state-transition matrix that uses dictionaries.\n", + "\n", + "As illustrated here, the matrix data access feature is an advanced capability that represents the state of a system using matrices. This can provide efficiency for use-cases where the state is easily represented by a tensor and operations are defined by matrices." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### State Limits" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In real-world physical systems, there are often constraints on what values the states can take. For example, in the case of a thrown object, if we define our reference frame with the ground at a position of $x=0$, then the position of the object should only be greater than or equal to 0, and should never take on negative values. In ProgPy, we can enforce constraints on the range of each state for a state-transition model using the [state limits](https://nasa.github.io/progpy/prog_models_guide.html#state-limits) attribute. \n", + "\n", + "To illustrate the use of `state_limits`, we'll use our thrown object model `ThrownObject_ST`, created in an above section. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m_limits = ThrownObject_ST()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before adding state limits, let's take a look at the standard model without state limits. We'll consider the event of `impact`, and simulate the object to threshold." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "event = \"impact\"\n", + "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1)\n", + "\n", + "print(\"Example: No State Limits\")\n", + "for i, state in enumerate(simulated_results.states):\n", + " print(f\"State {i}: {state}\")\n", + "print()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that at the end of the simulation, the object's position (`x`) is negative. This doesn't make sense physically, since the object cannot fall below ground level (at $x=0$).\n", + "\n", + "To avoid this, and keep the state in a realistic range, we can change the `state_limits` attribute of the model. The `state_limits` attribute is a dictionary that contains the state limits for each state. The keys of the dictionary are the state names, and the values are tuples that contain the lower and upper limits of the state. \n", + "\n", + "In our Thrown Object model, our states are position, which can range from 0 to infinity, and velocity, which we'll limit to not exceed the speed of light." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import inf\n", + "from math import inf\n", + "\n", + "m_limits.state_limits = {\n", + " # object position may not go below ground height\n", + " \"x\": (0, inf),\n", + " # object velocity may not exceed the speed of light\n", + " \"v\": (-299792458, 299792458),\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we've specified the ranges for our state values, let's try simulating again. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "event = \"impact\"\n", + "simulated_results = m_limits.simulate_to_threshold(events=event, dt=0.005, save_freq=1)\n", + "\n", + "print(\"Example: With State Limits\")\n", + "for i, state in enumerate(simulated_results.states):\n", + " print(f\"State {i}: {state}\")\n", + "print()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that now the position (`x`) becomes 0 but never reaches a negative value. This is because we have defined a state limit for the `x` state that prevents it from going below 0. Also note that a warning is provided to notify the user that a state value was limited. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's try a more complicated example. This time, we'll try setting the initial position value to be a number outside of its bounds. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x0 = m_limits.initialize(u={}, z={})\n", + "x0[\"x\"] = -1 # Initial position value set to an unrealistic value of -1\n", + "\n", + "simulated_results = m_limits.simulate_to_threshold(\n", + " events=event, dt=0.005, save_freq=1, x=x0\n", + ")\n", + "\n", + "# Print states\n", + "print(\"Example 2: With -1 as initial x value\")\n", + "for i, state in enumerate(simulated_results.states):\n", + " print(\"State \", i, \": \", state)\n", + "print()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that the simulation stops after just two iterations. In this case, the initial position value is outside the state limit. On the first iteration, the position value is therefore adjusted to be within the appropriate range of 0 to $\\infty$. Since we are simulating to impact, which is defined as when position is 0, the threshold is immediately satisfied and the simulation stops. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, note that limits can also be applied manually using the `apply_limits` function. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x = {\"x\": -5, \"v\": 3e8} # Too fast and below the ground\n", + "print(\"\\t Pre-limit: {}\".format(x))\n", + "\n", + "x = m_limits.apply_limits(x)\n", + "print(\"\\t Post-limit: {}\".format(x))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In conclusion, setting appropriate [state limits](https://nasa.github.io/progpy/prog_models_guide.html#state-limits) is crucial in creating realistic and accurate state-transition models. It ensures that the model's behavior stays within the constraints of the physical system. The limits should be set based on the physical or practical constraints of the system being modeled. \n", + "\n", + "As a final note, state limits are especially important for state estimation (to be discussed in the State Estimation section), as it will force the state estimator to only consider states that are possible or feasible. State estimation will be described in more detail in section __[07 State Estimation](07_State%20Estimation.ipynb)__. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom Events" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the examples above, we have focused on the simple event of a thrown object hitting the ground or reaching `impact`. In this section, we highlight additional uses of ProgPy's generalizable concept of `events`. \n", + "\n", + "The term [events](https://nasa.github.io/progpy/prog_models_guide.html#events) is used to describe something to be predicted. Generally in the PHM community, these are referred to as End of Life (`EOL`). However, they can be much more. \n", + "\n", + "In ProgPy, events can be anything that needs to be predicted. Systems will often have multiple failure modes, and each of these modes can be represented by a separate event. Additionally, events can also be used to predict other events of interest other than failure, such as special system states or warning thresholds. Thus, `events` in ProgPy can represent End of Life (`EOL`), End of Mission (`EOM`), warning thresholds, or any Event of Interest (`EOI`). \n", + "\n", + "There are a few components of the model that must be specified in order to define events:\n", + "\n", + "1. The `events` property defines the expected events \n", + "\n", + "2. The `threshold_met` method defines the conditions under which an event occurs \n", + "\n", + "3. The `event_state` method returns an estimate of progress towards the threshold \n", + "\n", + "Note that because of the interconnected relationship between `threshold_met` and `event_state`, it is only required to define one of these. However, there are frequently computational advantages to specifying both. \n", + "\n", + "To illustrate this concept, we will use the `BatteryElectroChemEOD` model (see section 03. Included Models). In the standard implementation of this model, the defined event is `EOD` or End of Discharge. This occurs when the voltage drops below a pre-defined threshold value. The State-of-Charge (SOC) of the battery is the event state for the EOD event. Recall that event states (and therefore SOC) vary between 0 and 1, where 1 is healthy and 0 signifies the event has occurred. \n", + "\n", + "Suppose we have the requirement that our battery must not fall below 5% State-of-Charge. This would correspond to an `EOD` event state of 0.05. Additionally, let's add events for two warning thresholds, a $\\text{\\textcolor{yellow}{yellow}}$ threshold at 15% SOC and a $\\text{\\textcolor{red}{red}}$ threshold at 10% SOC. \n", + "\n", + "To define the model, we'll start with the necessary imports." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "from progpy.loading import Piecewise\n", + "from progpy.models import BatteryElectroChemEOD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's define our threshold values. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "YELLOW_THRESH = 0.15 # 15% SOC\n", + "RED_THRESH = 0.1 # 10% SOC\n", + "THRESHOLD = 0.05 # 5% SOC" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we'll create our model by subclassing from the `BatteryElectroChemEOD` model. First, we'll re-define `events` to include three new events for our two warnings and new threshold value, as well as the event `EOD` from the parent class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class BattNewEvent(BatteryElectroChemEOD):\n", + " events = BatteryElectroChemEOD.events + [\n", + " \"EOD_warn_yellow\",\n", + " \"EOD_warn_red\",\n", + " \"EOD_requirement_threshold\",\n", + " ]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll override the `event_state` method to additionally include calculations for progress towards each of our new events. We'll add yellow, red, and failure states by scaling the EOD state. We scale so that the threshold SOC is 0 at their associated events, while SOC of 1 is still 1. For example, for yellow, we want `EOD_warn_yellow` to be 1 when SOC is 1, and 0 when SOC is 0.15 or lower. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class BattNewEvent(BattNewEvent):\n", + " def event_state(self, state):\n", + " # Get event state from parent\n", + " event_state = super().event_state(state)\n", + "\n", + " # Add yellow, red, and failure states by scaling EOD state\n", + " event_state[\"EOD_warn_yellow\"] = (event_state[\"EOD\"] - YELLOW_THRESH) / (\n", + " 1 - YELLOW_THRESH\n", + " )\n", + " event_state[\"EOD_warn_red\"] = (event_state[\"EOD\"] - RED_THRESH) / (\n", + " 1 - RED_THRESH\n", + " )\n", + " event_state[\"EOD_requirement_threshold\"] = (event_state[\"EOD\"] - THRESHOLD) / (\n", + " 1 - THRESHOLD\n", + " )\n", + "\n", + " return event_state" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll override the `threshold_met` method to define when each event occurs. Based on the scaling in `event_state` each event is reached when the corresponding `event_state` value is less than or equal to 0. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class BattNewEvent(BattNewEvent):\n", + " def threshold_met(self, x):\n", + " # Get threshold met from parent\n", + " t_met = super().threshold_met(x)\n", + "\n", + " # Add yell and red states from event_state\n", + " event_state = self.event_state(x)\n", + " t_met[\"EOD_warn_yellow\"] = event_state[\"EOD_warn_yellow\"] <= 0\n", + " t_met[\"EOD_warn_red\"] = event_state[\"EOD_warn_red\"] <= 0\n", + " t_met[\"EOD_requirement_threshold\"] = (\n", + " event_state[\"EOD_requirement_threshold\"] <= 0\n", + " )\n", + "\n", + " return t_met" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With this, we have defined the three key model components for defining new events. \n", + "\n", + "Let's test out the model. First, create an instance of it. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = BattNewEvent()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Recall that the battery model takes input of current. We will use a piecewise loading scheme (see 01. Simulation)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Variable (piecewise) future loading scheme\n", + "future_loading = Piecewise(\n", + " m.InputContainer, [600, 900, 1800, 3000], {\"i\": [2, 1, 4, 2, 3]}\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can simulate to threshold and plot the results. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "simulated_results = m.simulate_to_threshold(future_loading, events=\"EOD\", print=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\", title=\"BattNewEvent model simulation EOD event state\"\n", + ")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we can see the SOC plotted for the different events throughout time. The yellow warning (15% SOC) reaches threshold first, followed by the red warning (10% SOC), new EOD threshold (5% SOC), and finally the original EOD value. \n", + "\n", + "In this section, we have illustrated how to define custom [events](https://nasa.github.io/progpy/prog_models_guide.html#events) for prognostics models. Events can be used to define anything that a user is interested in predicting, including common values like Remaining Useful Life (RUL) and End of Discharge (EOD), as well as other values like special intermediate states or warning thresholds. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Serialization " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "ProgPy includes a feature to serialize models, which we highlight in this section. \n", + "\n", + "Model serialization has a variety of purposes. For example, serialization allows us to save a specific model or model configuration to a file to be loaded later, or can aid us in sending a model to another machine over a network connection. Some users maintain a directory or repository of configured models representing specific systems in their stock.\n", + "\n", + "In this section, we'll show how to serialize and deserialize model objects using `pickle` and `JSON` methods. \n", + "\n", + "First, we'll import the necessary modules." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import pickle\n", + "import numpy as np\n", + "from progpy.models import BatteryElectroChemEOD\n", + "from progpy.loading import Piecewise" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For this example, we'll use the `BatteryElectroChemEOD` model. We'll start by creating a model object. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batt = BatteryElectroChemEOD()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we'll serialize the model in two different ways using 1) `pickle` and 2) `JSON`. Then, we'll plot the results from simulating the deserialized models to show equivalence of the methods. \n", + "\n", + "To save using the `pickle` package, we'll serialize the model using the `dump` method. Once saved, we can then deserialize using the `load` method. In practice, deserializing will likely occur in a different file or in a later use-case, but here we deserialize to show equivalence of the saved model. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pickle.dump(batt, open(\"save_pkl.pkl\", \"wb\")) # Serialize model\n", + "load_pkl = pickle.load(open(\"save_pkl.pkl\", \"rb\")) # Deserialize model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll serialize using the `to_json` method. We deserialize by calling the model directly with the serialized result using the `from_json` method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "save_json = batt.to_json() # Serialize model\n", + "json_1 = BatteryElectroChemEOD.from_json(save_json) # Deserialize model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the serialized result can also be saved to a text file and uploaded for later use. We demonstrate this below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "txtFile = open(\"save_json.txt\", \"w\")\n", + "txtFile.write(save_json)\n", + "txtFile.close()\n", + "\n", + "with open(\"save_json.txt\") as infile:\n", + " load_json = infile.read()\n", + "\n", + "json_2 = BatteryElectroChemEOD.from_json(load_json)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have now serialized and deserialized the model using `pickle` and `JSON` methods. Let's compare the resulting models. To do so, we'll use ProgPy's [simulation](https://nasa.github.io/progpy/prog_models_guide.html#simulation) to simulate the model to threshold and compare the results. \n", + "\n", + "First, we'll need to define our [future loading profile](https://nasa.github.io/progpy/prog_models_guide.html#future-loading) using the PiecewiseLoad class. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Variable (piecewise) future loading scheme\n", + "future_loading = Piecewise(\n", + " batt.InputContainer, [600, 1000, 1500, 3000], {\"i\": [3, 2, 1.5, 4]}\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's simulate each model to threshold using the `simulate_to_threshold` method. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Original model\n", + "results_orig = batt.simulate_to_threshold(future_loading, save_freq=1)\n", + "# Pickled version\n", + "results_pkl = load_pkl.simulate_to_threshold(future_loading, save_freq=1)\n", + "# JSON versions\n", + "results_json_1 = json_1.simulate_to_threshold(future_loading, save_freq=1)\n", + "results_json_2 = json_2.simulate_to_threshold(future_loading, save_freq=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's plot the results for comparison." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "voltage_orig = [\n", + " results_orig.outputs[iter][\"v\"] for iter in range(len(results_orig.times))\n", + "]\n", + "voltage_pkl = [results_pkl.outputs[iter][\"v\"] for iter in range(len(results_pkl.times))]\n", + "voltage_json_1 = [\n", + " results_json_1.outputs[iter][\"v\"] for iter in range(len(results_json_1.times))\n", + "]\n", + "voltage_json_2 = [\n", + " results_json_2.outputs[iter][\"v\"] for iter in range(len(results_json_2.times))\n", + "]\n", + "\n", + "plt.plot(results_orig.times, voltage_orig, \"-b\", label=\"Original surrogate\")\n", + "plt.plot(results_pkl.times, voltage_pkl, \"--r\", label=\"Pickled serialized surrogate\")\n", + "plt.plot(\n", + " results_json_1.times, voltage_json_1, \"-.g\", label=\"First JSON serialized surrogate\"\n", + ")\n", + "plt.plot(\n", + " results_json_2.times,\n", + " voltage_json_2,\n", + " \"--y\",\n", + " label=\"Second JSON serialized surrogate\",\n", + ")\n", + "plt.legend()\n", + "plt.title(\"Serialized model simulation outputs\")\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"voltage (V)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "All of the voltage curves overlap, showing that the different serialization methods produce the same results. \n", + "\n", + "Additionally, we can compare the output arrays directly, to ensure equivalence. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check if the arrays are the same\n", + "are_arrays_same = (\n", + " np.array_equal(voltage_orig, voltage_pkl)\n", + " and np.array_equal(voltage_orig, voltage_json_1)\n", + " and np.array_equal(voltage_orig, voltage_json_2)\n", + ")\n", + "\n", + "print(\n", + " f\"The simulated results from the original and serialized models are {'identical. This means that our serialization works!' if are_arrays_same else 'not identical. This means that our serialization does not work.'}\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To conclude, we have shown how to serialize models in ProgPy using both `pickle` and `JSON` methods. Understanding how to serialize and deserialize models can be a powerful tool for prognostics developers. It enables the saving of models to a disk and the re-loading of these models back into memory at a later time. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simplified Battery Model Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is an example of a somewhat more complicated model, in this case a battery. We will be implementing the simplified battery model introduced by [Gina Sierra, et. al.](https://www.sciencedirect.com/science/article/pii/S0951832018301406)\n", + "\n", + "First, we will import `PrognosticsModel`, which the parent class for all ProgPy models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy import PrognosticsModel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### State Transition" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first step to creating a physics-based model is implementing state transition. From the paper we see one state (SOC) and one state transition equation:\n", + "\n", + "$$SOC(k+1) = SOC(k) - P(k)*\\Delta t * E_{crit}^{-1} + w_2(k)$$\n", + "\n", + "where $k$ is discrete time. The $w$ term is process noise. This can be omitted, since it's handled by ProgPy. \n", + "\n", + "In this equation we see one input ($P$, power). Note that the previous battery model uses current, where this uses power. With this information, we can start defining our model. First, we start by declaring our inputs and states:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(PrognosticsModel):\n", + " inputs = [\"P\"]\n", + " states = [\"SOC\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we define parameters. In this case the parameters are the initial `SOC` state (1) and the `E_crit` (Internal Total Energy). We get the value for $E_{crit}$ from the paper.\n", + "\n", + "***Note:** This won't actually subclass in practice, but it's used to break apart model definition into chunks.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858, # Internal Total Energy\n", + " \"x0\": {\n", + " \"SOC\": 1, # State of Charge\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We know that SOC will always be between 0 and 1, so we can specify that explicitly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " state_limits = {\n", + " \"SOC\": (0.0, 1.0),\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we define the state transition equation. There are two methods for doing this: `dx` (for continuous) and `next_state` (for discrete). We will use the `dx` function since the model is continuous." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " def dx(self, x, u):\n", + " return self.StateContainer({\"SOC\": -u[\"P\"] / self[\"E_crit\"]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that state transition is defined, the next step is to define the outputs of the function. From the paper we have the following output equations:\n", + "\n", + "$$v(k) = v_{oc}(k) - i(k) * R_{int} + \\eta (k)$$\n", + "\n", + "$$v_{oc}(k) = v_L - \\lambda ^ {\\gamma * SOC(k)} - \\mu * e ^ {-\\beta * \\sqrt{SOC(k)}}$$\n", + "\n", + "$$i(k) = \\frac{v_{oc}(k) - \\sqrt{v_{oc}(k)^2 - 4 * R_{int} * P(k)}}{2 * R_{int}(k)}$$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is one output here (v, voltage), the same one input (P, Power), and a few lumped parameters: $v_L$, $\\lambda$, $\\gamma$, $\\mu$, $\\beta$, and $R_{int}$. The default parameters are found in the paper.\n", + "\n", + "$\\eta$ is the measurement noise, which ProgPy handles, so that's omitted from the equation below.\n", + "\n", + "***Note**: There is a typo in the paper where the sign of the second term in the $v_{oc}$ term. It should be negative (like above), but is reported as positive in the paper.*\n", + "\n", + "We can update the definition of the model to include this output and parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " outputs = [\"v\"]\n", + "\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858,\n", + " \"v_L\": 11.148,\n", + " \"lambda\": 0.046,\n", + " \"gamma\": 3.355,\n", + " \"mu\": 2.759,\n", + " \"beta\": 8.482,\n", + " \"R_int\": 0.027,\n", + " \"x0\": {\n", + " \"SOC\": 1,\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The input ($P(k)$) is also used in the output equation, which means it's part of the state of the system. We will update the states in `next_state`. \n", + "\n", + "Remember that in the earlier example, we defined the state transition with ProgPy's `dx` method because the model was continuous. Here, with the addition of power, the model becomes discrete, so we must now use ProgPy's `next_state` method to define state transition." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " states = [\"SOC\", \"P\"]\n", + "\n", + " def next_state(self, x, u, dt):\n", + " x[\"SOC\"] = x[\"SOC\"] - u[\"P\"] * dt / self[\"E_crit\"]\n", + " x[\"P\"] = u[\"P\"]\n", + "\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will also add a default `P` state." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858,\n", + " \"v_L\": 11.148,\n", + " \"lambda\": 0.046,\n", + " \"gamma\": 3.355,\n", + " \"mu\": 2.759,\n", + " \"beta\": 8.482,\n", + " \"R_int\": 0.027,\n", + " \"x0\": {\n", + " \"SOC\": 1,\n", + " \"P\": 0.01, # Added P\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we're ready to define the output equations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import math\n", + "\n", + "\n", + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " def output(self, x):\n", + " v_oc = (\n", + " self[\"v_L\"]\n", + " - self[\"lambda\"] ** (self[\"gamma\"] * x[\"SOC\"])\n", + " - self[\"mu\"] * math.exp(-self[\"beta\"] * math.sqrt(x[\"SOC\"]))\n", + " )\n", + " i = (v_oc - math.sqrt(v_oc**2 - 4 * self[\"R_int\"] * x[\"P\"])) / (\n", + " 2 * self[\"R_int\"]\n", + " )\n", + " v = v_oc - i * self[\"R_int\"]\n", + " return self.OutputContainer({\"v\": v})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Events\n", + "Finally we can define events. This is an easy case because our event state (`SOC`) is part of the model state. So we will simply define a single event (`EOD`: End of Discharge), where `SOC` is progress towards that event." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " events = [\"EOD\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then for our event state, we simply extract the relevant state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivalentCircuit(SimplifiedEquivalentCircuit):\n", + " def event_state(self, x):\n", + " return {\"EOD\": x[\"SOC\"]}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The threshold of the event is defined as the state where the event state (`EOD`) is 0.\n", + "\n", + "We've now defined a complete model. Now it's ready to be used for state estimation or prognostics, like any model distributed with ProgPy.\n", + "\n", + "Note that this model can be extended by changing the parameters `ecrit` and `r` to steady states. This will help the model account for the effects of aging, since they will be estimated with each state estimation step." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In these examples, we have described how to create new physics-based models. We have illustrated how to construct a generic physics-based model, as well as highlighted some specific types of models including linear models and direct models. We highlighted the matrix data access feature for using matrix operations more efficiently. Additionally, we discussed a few important components of any prognostics model including derived parameters, state limits, and events. \n", + "\n", + "With these tools, users are well-equipped to build their own prognostics models for their specific physics-based use-cases. In the next notebook __[05 Data Driven](05_Data%20Driven.ipynb)__, we'll discuss how to create data-driven models." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.11.0 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.0" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/82813b0c0a64173923320c20994e0946/vectorized.py b/docs/_downloads/82813b0c0a64173923320c20994e0946/vectorized.py index 223d6369..93ce8acb 100644 --- a/docs/_downloads/82813b0c0a64173923320c20994e0946/vectorized.py +++ b/docs/_downloads/82813b0c0a64173923320c20994e0946/vectorized.py @@ -8,29 +8,43 @@ from progpy.models.thrown_object import ThrownObject from numpy import array, all + def run_example(): # Step 1: Setup object m = ThrownObject() + def future_load(t, x=None): return {} # No load for thrown objects # Step 2: Setup vectorized initial state # For this example we are saying there are 4 throwers of various strengths and heights - first_state = { - 'x': array([1.75, 1.8, 1.85, 1.9]), - 'v': array([35, 39, 22, 47]) - } + first_state = {"x": array([1.75, 1.8, 1.85, 1.9]), "v": array([35, 39, 22, 47])} # Step 3: Simulate to threshold # Here we are simulating till impact using the first state defined above - (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, x = first_state, threshold_keys=['impact'], print = True, dt=0.1, save_freq=2) + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold( + future_load, + x=first_state, + threshold_keys=["impact"], + print=True, + dt=0.1, + save_freq=2, + ) # Now lets do the same thing but only stop when all hit the ground def thresholds_met_eqn(thresholds_met): - return all(thresholds_met['impact']) # Stop when all impact ground + return all(thresholds_met["impact"]) # Stop when all impact ground + + simulated_results = m.simulate_to_threshold( + future_load, + x=first_state, + thresholds_met_eqn=thresholds_met_eqn, + print=True, + dt=0.1, + save_freq=2, + ) - simulated_results = m.simulate_to_threshold(future_load, x = first_state, thresholds_met_eqn=thresholds_met_eqn, print = True, dt=0.1, save_freq=2) -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/847c5bdc6f78e586d5a56a90de4f1d95/playback.py b/docs/_downloads/847c5bdc6f78e586d5a56a90de4f1d95/playback.py new file mode 100644 index 00000000..d4993013 --- /dev/null +++ b/docs/_downloads/847c5bdc6f78e586d5a56a90de4f1d95/playback.py @@ -0,0 +1,168 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + +""" +This example performs state estimation and prediction using playback data. + +Method: An instance of the BatteryCircuit model in progpy is created, the state estimation is set up by defining a state_estimator, and the prediction method is set up by defining a predictor. + Prediction is then performed using playback data. For each data point: + 1) The necessary data is extracted (time, current load, output values) and corresponding values defined (t, i, and z) + 2) The current state estimate is performed and samples are drawn from this distribution + 3) Prediction performed to get future states (with uncertainty) and the times at which the event threshold will be reached + +Results: + i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction + ii) Time event is predicted to occur (with uncertainty) + iii) Various prediction metrics + iv) Figures illustrating results +""" + +import csv +import numpy as np +from progpy.predictors import ToEPredictionProfile +from progpy.uncertain_data.multivariate_normal_dist import MultivariateNormalDist + +from progpy.models import BatteryCircuit as Battery +# VVV Uncomment this to use Electro Chemistry Model VVV +# from progpy.models import BatteryElectroChem as Battery + +from progpy.state_estimators import UnscentedKalmanFilter as StateEstimator +# VVV Uncomment this to use UnscentedKalmanFilter instead VVV +# from progpy.state_estimators import ParticleFilter as StateEstimator + +from progpy.predictors import UnscentedTransformPredictor as Predictor +# VVV Uncomment this to use MonteCarloPredictor instead +# from progpy.predictors import MonteCarlo as Predictor + +# Constants +NUM_SAMPLES = 20 +NUM_PARTICLES = 1000 # For state estimator (if using ParticleFilter) +TIME_STEP = 1 +PREDICTION_UPDATE_FREQ = 50 # Number of steps between prediction update +PLOT = True +PROCESS_NOISE = 1e-4 # Percentage process noise +MEASUREMENT_NOISE = 1e-4 # Percentage measurement noise +X0_COV = 1 # Covariance percentage with initial state +GROUND_TRUTH = {"EOD": 2780} +ALPHA = 0.05 +BETA = 0.90 +LAMBDA_VALUE = 1500 + + +def run_example(): + # Setup Model + batt = Battery() + + # Initial state + x0 = batt.initialize() + batt.parameters["process_noise"] = { + key: PROCESS_NOISE * value for key, value in x0.items() + } + z0 = batt.output(x0) + batt.parameters["measurement_noise"] = { + key: MEASUREMENT_NOISE * value for key, value in z0.items() + } + x0 = MultivariateNormalDist( + x0.keys(), + list(x0.values()), + np.diag([max(1e-9, X0_COV * abs(x)) for x in x0.values()]), + ) + + # Setup State Estimation + filt = StateEstimator(batt, x0, num_particles=NUM_PARTICLES) + + # Setup Prediction + load = batt.InputContainer({"i": 2.35}) + + def future_loading(t, x=None): + return load + + Q = np.diag([batt.parameters["process_noise"][key] for key in batt.states]) + R = np.diag([batt.parameters["measurement_noise"][key] for key in batt.outputs]) + mc = Predictor(batt, Q=Q, R=R) + + # Run Playback + step = 0 + profile = ToEPredictionProfile() + + with open("examples/data_const_load.csv", "r") as f: + reader = csv.reader(f) + next(reader) # Skip header + for row in reader: + step += 1 + print("{} s: {} W, {} C, {} V".format(*row)) + t = float(row[0]) + i = {"i": float(row[1]) / float(row[3])} + z = {"t": float(row[2]), "v": float(row[3])} + + # State Estimation Step + filt.estimate(t, i, z) + eod = batt.event_state(filt.x.mean)["EOD"] + print(" - Event State: ", eod) + + # Prediction Step (every PREDICTION_UPDATE_FREQ steps) + if step % PREDICTION_UPDATE_FREQ == 0: + mc_results = mc.predict( + filt.x, future_loading, t0=t, n_samples=NUM_SAMPLES, dt=TIME_STEP + ) + metrics = mc_results.time_of_event.metrics() + print( + " - ToE: {} (sigma: {})".format( + metrics["EOD"]["mean"], metrics["EOD"]["std"] + ) + ) + profile.add_prediction(t, mc_results.time_of_event) + + # Calculating Prognostic Horizon once the loop completes + from progpy.uncertain_data.uncertain_data import UncertainData + from progpy.metrics import samples as metrics + + def criteria_eqn(tte: UncertainData, ground_truth_tte: dict) -> dict: + """ + Sample criteria equation for playback. + # UPDATE THIS CRITERIA EQN AND WHAT IS CALCULATED + + Args: + tte : UncertainData + Time to event in UncertainData format. + ground_truth_tte : dict + Dictionary of ground truth of time to event. + """ + + # Set an alpha value + bounds = {} + for key, value in ground_truth_tte.items(): + # Set bounds for precentage_in_bounds by adding/subtracting to the ground_truth + alpha_calc = value * ALPHA + bounds[key] = [ + value - alpha_calc, + value + alpha_calc, + ] # Construct bounds for all events + percentage_in_bounds = tte.percentage_in_bounds(bounds) + + # Verify if percentage in bounds for this ground truth meets beta distribution percentage limit + return { + key: percentage_in_bounds[key] > BETA + for key in percentage_in_bounds.keys() + } + + # Generate plots for playback example + playback_plots = profile.plot(GROUND_TRUTH, ALPHA, True) + + # Calculate prognostic horizon with ground truth, and print + ph = profile.prognostic_horizon(criteria_eqn, GROUND_TRUTH) + print(f"Prognostic Horizon for 'EOD': {ph['EOD']}") + + # Calculate alpha lambda with ground truth, lambda, alpha, and beta, and print + al = profile.alpha_lambda(GROUND_TRUTH, LAMBDA_VALUE, ALPHA, BETA) + print(f"Alpha Lambda for 'EOD': {al['EOD']}") + + # Calculate cumulative relative accuracy with ground truth, and print + cra = profile.cumulative_relative_accuracy(GROUND_TRUTH) + print(f"Cumulative Relative Accuracy for 'EOD': {cra['EOD']}") + + input("Press any key to exit") + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/863933ac0f3e7d454c420a01034525b3/linear_model.py b/docs/_downloads/863933ac0f3e7d454c420a01034525b3/linear_model.py index dc648bb8..56f49520 100644 --- a/docs/_downloads/863933ac0f3e7d454c420a01034525b3/linear_model.py +++ b/docs/_downloads/863933ac0f3e7d454c420a01034525b3/linear_model.py @@ -2,14 +2,15 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -This example shows the use of the LinearModel class, a subclass of PrognosticsModel for models that can be described as a linear time series. +This example shows the use of the LinearModel class, a subclass of PrognosticsModel for models that can be described as a linear time series. The model is used in a simulation, and the state is printed every second """ import numpy as np from prog_models import LinearModel - + + class ThrownObject(LinearModel): """ Model that simulates an object thrown into the air without air resistance @@ -30,8 +31,8 @@ class ThrownObject(LinearModel): Keyword Args ------------ process_noise : Optional, float or dict[Srt, float] - Process noise (applied at dx/next_state). - Can be number (e.g., .2) applied to every state, a dictionary of values for each + Process noise (applied at dx/next_state). + Can be number (e.g., .2) applied to every state, a dictionary of values for each state (e.g., {'x1': 0.2, 'x2': 0.3}), or a function (x) -> x process_noise_dist : Optional, str distribution for process noise (e.g., normal, uniform, triangular) @@ -51,60 +52,75 @@ class ThrownObject(LinearModel): inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] + "x", # Position (m) + "v", # Velocity (m/s) + ] outputs = [ - 'x' # Position (m) + "x" # Position (m) ] events = [ - 'impact' # Event- object has impacted ground + "impact" # Event- object has impacted ground ] - # These are the core of the linear model. + # These are the core of the linear model. # Linear models defined by the following equations: # * dx/dt = Ax + Bu + E # * z = Cx + D # * event states = Fx + G - A = np.array([[0, 1], [0, 0]]) # dx/dt = Ax + Bu + E - E = np.array([[0], [-9.81]]) # Acceleration due to gravity (m/s^2) - C = np.array([[1, 0]]) # z = Cx + D - F = None # Will override method + A = np.array([[0, 1], [0, 0]]) # dx/dt = Ax + Bu + E + E = np.array([[0], [-9.81]]) # Acceleration due to gravity (m/s^2) + C = np.array([[1, 0]]) # z = Cx + D + F = None # Will override method # The Default parameters. Overwritten by passing parameters dictionary into constructor default_parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81 # Acceleration due to gravity in m/s^2 + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 } def initialize(self, u=None, z=None): - return self.StateContainer({ - 'x': self.parameters['thrower_height'], # Thrown, so initial altitude is height of thrower - 'v': self.parameters['throwing_speed'] # Velocity at which the ball is thrown - this guy is a professional baseball pitcher - }) - + return self.StateContainer( + { + "x": self.parameters[ + "thrower_height" + ], # Thrown, so initial altitude is height of thrower + "v": self.parameters[ + "throwing_speed" + ], # Velocity at which the ball is thrown - this guy is a professional baseball pitcher + } + ) + # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. # Threshold = Event State == 0. However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height + def event_state(self, x): + x_max = x["x"] + np.square(x["v"]) / ( + -self.parameters["g"] * 2 + ) # Use speed and position to estimate maximum height return { - 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1 # 1 until falling begins, then it's fraction of height + "falling": np.maximum( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": np.maximum(x["x"] / x_max, 0) + if x["v"] < 0 + else 1, # 1 until falling begins, then it's fraction of height } + def run_example(): m = ThrownObject() + def future_loading(t, x=None): - return m.InputContainer({}) # No loading - m.simulate_to_threshold(future_loading, print = True, save_freq=1, threshold_keys='impact', dt=0.1) + return m.InputContainer({}) # No loading + + m.simulate_to_threshold( + future_loading, print=True, save_freq=1, threshold_keys="impact", dt=0.1 + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/8845b048e23550d7cd03464611023e4b/chaos.py b/docs/_downloads/8845b048e23550d7cd03464611023e4b/chaos.py index c40e3b92..eaf2a673 100644 --- a/docs/_downloads/8845b048e23550d7cd03464611023e4b/chaos.py +++ b/docs/_downloads/8845b048e23550d7cd03464611023e4b/chaos.py @@ -1,5 +1,5 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. -# This ensures that the directory containing examples is in the python search directories +# This ensures that the directory containing examples is in the python search directories """ This example demonstrates the Polynomial Choas Expansion (PCE) Surrogate Direct Model functionality. PCE is a method by which the behavior of a model can be approximated by a polynomial. In this case the relationship between future loading and time of event. The result is a direct surrogate model that can be used to estimate time of event given a loading profile, without requiring the original model to be simulated. The resulting estimation is MUCH faster than simulating the model. @@ -16,15 +16,16 @@ from progpy.data_models import PCE import scipy as sp + def run_example(): # First lets define some constants # Time step used in simulation - DT = 0.5 + DT = 0.5 # The number of samples to used in the PCE # Larger gives a better approximation, but takes longer to generate - N_SAMPLES = 100 + N_SAMPLES = 100 # The distribution of the input current # This defines the expected values for the input @@ -32,27 +33,29 @@ def run_example(): # With a uniform distribution (i.e., no value in that range is more likely than any other) INPUT_CURRENT_DIST = cp.Uniform(3, 8) # Note: These discharge rates are VERY high. This is only for demonstration purposes. - # The high discharge rate will accelerate the degradation of the battery, + # The high discharge rate will accelerate the degradation of the battery, # which will cause the example to run faster # Step 1: Define base model # First let's define the base model that we're creating a surrogate for. - m = BatteryElectroChemEOD(process_noise = 0) + m = BatteryElectroChemEOD(process_noise=0) x0 = m.initialize() # Initial State - + # Step 2: Build surrogate # Next we build the surrogate model from the base model # To build the model we pass in the distributions of possible values for each input. # We also provide the max_time. This is the maximum time that the surrogate will be used for. # We dont expect any battery to last more than 4000 seconds given the high discharge curves we're passing in. - m_surrogate = PCE.from_model(m, - x0, # Model State - {'i': INPUT_CURRENT_DIST}, # Distribution of inputs - dt=DT, - times = [i*1000 for i in range(5)], - N = N_SAMPLES) + m_surrogate = PCE.from_model( + m, + x0, # Model State + {"i": INPUT_CURRENT_DIST}, # Distribution of inputs + dt=DT, + times=[i * 1000 for i in range(5)], + N=N_SAMPLES, + ) # The result (m_surrogate) is a model that can be used to VERY quickly estimate time_of_event for a new loading profile. - + # Note: this is only valid for the initial state (x0) of the battery. # To train for another state pass in the parameter x (type StateContainer). # e.g. m_surrogate = PCE.from_model(m, SOME_OTHER_STATE, ...) @@ -71,25 +74,30 @@ def run_example(): def future_loading(t, x=None): return m.InputContainer(interpolator(t)[np.newaxis].T) - TEST_SAMPLES = m_surrogate.parameters['J'].sample(size=N_TEST_CASES, rule='latin_hypercube') + TEST_SAMPLES = m_surrogate.parameters["J"].sample( + size=N_TEST_CASES, rule="latin_hypercube" + ) for i in range(N_TEST_CASES): # Generate a new loading profile - interpolator = sp.interpolate.interp1d(m_surrogate.parameters['times'], TEST_SAMPLES[:, i]) - + interpolator = sp.interpolate.interp1d( + m_surrogate.parameters["times"], TEST_SAMPLES[:, i] + ) + # Estimate time of event from ground truth (original model) and surrogate - gt_results[i] = m.time_of_event(x0, future_loading, dt = DT)['EOD'] - surrogate_results[i] = m_surrogate.time_of_event(x0, future_loading)['EOD'] + gt_results[i] = m.time_of_event(x0, future_loading, dt=DT)["EOD"] + surrogate_results[i] = m_surrogate.time_of_event(x0, future_loading)["EOD"] # Plot results # Note here that the approximation is very good, but not perfect # Approximation would be even better with more samples plt.scatter(gt_results, surrogate_results) max_val = max(max(gt_results), max(surrogate_results)) - plt.plot([0, max_val], [0, max_val], 'k--') + plt.plot([0, max_val], [0, max_val], "k--") plt.xlabel("Ground Truth (s)") plt.ylabel("PCE (s)") plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/8a6417aab284708af62d3e7aa65593f6/state_limits.py b/docs/_downloads/8a6417aab284708af62d3e7aa65593f6/state_limits.py index d6e26b64..73a847fb 100644 --- a/docs/_downloads/8a6417aab284708af62d3e7aa65593f6/state_limits.py +++ b/docs/_downloads/8a6417aab284708af62d3e7aa65593f6/state_limits.py @@ -2,68 +2,81 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating when and how to identify model state limits. +Example demonstrating when and how to identify model state limits. """ from progpy.models.thrown_object import ThrownObject from math import inf + def run_example(): # Demo model # Step 1: Create instance of model (without drag) - m = ThrownObject( cd = 0 ) + m = ThrownObject(cd=0) - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return {} # add state limits m.state_limits = { # object may not go below ground height - 'x': (0, inf), - + "x": (0, inf), # object may not exceed the speed of light - 'v': (-299792458, 299792458) + "v": (-299792458, 299792458), } # Step 3: Simulate to impact - event = 'impact' - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1) - + event = "impact" + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1 + ) + # Print states - print('Example 1') + print("Example 1") for i, state in enumerate(simulated_results.states): - print(f'State {i}: {state}') + print(f"State {i}: {state}") print() # Let's try setting x to a number outside of its bounds - x0 = m.initialize(u = {}, z = {}) - x0['x'] = -1 + x0 = m.initialize(u={}, z={}) + x0["x"] = -1 - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, x = x0) + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1, x=x0 + ) # Print states - print('Example 2') + print("Example 2") for i, state in enumerate(simulated_results.states): - print('State ', i, ': ', state) + print("State ", i, ": ", state) print() # Let's see what happens when the objects speed aproaches its limit - x0 = m.initialize(u = {}, z = {}) - x0['x'] = 1000000000 - x0['v'] = 0 - m.parameters['g'] = -50000000 - - print('Example 3') - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=0.3, x = x0, print = True, progress = False) + x0 = m.initialize(u={}, z={}) + x0["x"] = 1000000000 + x0["v"] = 0 + m.parameters["g"] = -50000000 + + print("Example 3") + simulated_results = m.simulate_to_threshold( + future_load, + threshold_keys=[event], + dt=0.005, + save_freq=0.3, + x=x0, + print=True, + progress=False, + ) # Note that the limits can also be applied manually using the apply_limits function - print('limiting states') - x = {'x': -5, 'v': 3e8} # Too fast and below the ground - print('\t Pre-limit: {}'.format(x)) + print("limiting states") + x = {"x": -5, "v": 3e8} # Too fast and below the ground + print("\t Pre-limit: {}".format(x)) x = m.apply_limits(x) - print('\t Post-limit: {}'.format(x)) + print("\t Post-limit: {}".format(x)) + -# This allows the module to be executed directly -if __name__=='__main__': - run_example() \ No newline at end of file +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py b/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py index 31401168..d4993013 100644 --- a/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py +++ b/docs/_downloads/8b8068cadf898f26bec76d85c19e7d58/playback.py @@ -1,15 +1,15 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. """ -This example performs state estimation and prediction using playback data. - +This example performs state estimation and prediction using playback data. + Method: An instance of the BatteryCircuit model in progpy is created, the state estimation is set up by defining a state_estimator, and the prediction method is set up by defining a predictor. Prediction is then performed using playback data. For each data point: 1) The necessary data is extracted (time, current load, output values) and corresponding values defined (t, i, and z) 2) The current state estimate is performed and samples are drawn from this distribution 3) Prediction performed to get future states (with uncertainty) and the times at which the event threshold will be reached - -Results: + +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) iii) Various prediction metrics @@ -35,73 +35,90 @@ # Constants NUM_SAMPLES = 20 -NUM_PARTICLES = 1000 # For state estimator (if using ParticleFilter) +NUM_PARTICLES = 1000 # For state estimator (if using ParticleFilter) TIME_STEP = 1 -PREDICTION_UPDATE_FREQ = 50 # Number of steps between prediction update +PREDICTION_UPDATE_FREQ = 50 # Number of steps between prediction update PLOT = True -PROCESS_NOISE = 1e-4 # Percentage process noise -MEASUREMENT_NOISE = 1e-4 # Percentage measurement noise -X0_COV = 1 # Covariance percentage with initial state -GROUND_TRUTH = {'EOD':2780} +PROCESS_NOISE = 1e-4 # Percentage process noise +MEASUREMENT_NOISE = 1e-4 # Percentage measurement noise +X0_COV = 1 # Covariance percentage with initial state +GROUND_TRUTH = {"EOD": 2780} ALPHA = 0.05 BETA = 0.90 LAMBDA_VALUE = 1500 + def run_example(): # Setup Model batt = Battery() # Initial state x0 = batt.initialize() - batt.parameters['process_noise'] = {key: PROCESS_NOISE * value for key, value in x0.items()} + batt.parameters["process_noise"] = { + key: PROCESS_NOISE * value for key, value in x0.items() + } z0 = batt.output(x0) - batt.parameters['measurement_noise'] = {key: MEASUREMENT_NOISE * value for key, value in z0.items()} - x0 = MultivariateNormalDist(x0.keys(), list(x0.values()), np.diag([max(1e-9, X0_COV * abs(x)) for x in x0.values()])) + batt.parameters["measurement_noise"] = { + key: MEASUREMENT_NOISE * value for key, value in z0.items() + } + x0 = MultivariateNormalDist( + x0.keys(), + list(x0.values()), + np.diag([max(1e-9, X0_COV * abs(x)) for x in x0.values()]), + ) # Setup State Estimation - filt = StateEstimator(batt, x0, num_particles = NUM_PARTICLES) + filt = StateEstimator(batt, x0, num_particles=NUM_PARTICLES) # Setup Prediction - load = batt.InputContainer({'i': 2.35}) + load = batt.InputContainer({"i": 2.35}) + def future_loading(t, x=None): return load - Q = np.diag([batt.parameters['process_noise'][key] for key in batt.states]) - R = np.diag([batt.parameters['measurement_noise'][key] for key in batt.outputs]) - mc = Predictor(batt, Q = Q, R = R) + + Q = np.diag([batt.parameters["process_noise"][key] for key in batt.states]) + R = np.diag([batt.parameters["measurement_noise"][key] for key in batt.outputs]) + mc = Predictor(batt, Q=Q, R=R) # Run Playback step = 0 profile = ToEPredictionProfile() - - with open('examples/data_const_load.csv', 'r') as f: + + with open("examples/data_const_load.csv", "r") as f: reader = csv.reader(f) - next(reader) # Skip header + next(reader) # Skip header for row in reader: step += 1 print("{} s: {} W, {} C, {} V".format(*row)) t = float(row[0]) - i = {'i': float(row[1])/float(row[3])} - z = {'t': float(row[2]), 'v': float(row[3])} + i = {"i": float(row[1]) / float(row[3])} + z = {"t": float(row[2]), "v": float(row[3])} # State Estimation Step filt.estimate(t, i, z) - eod = batt.event_state(filt.x.mean)['EOD'] + eod = batt.event_state(filt.x.mean)["EOD"] print(" - Event State: ", eod) # Prediction Step (every PREDICTION_UPDATE_FREQ steps) - if (step%PREDICTION_UPDATE_FREQ == 0): - mc_results = mc.predict(filt.x, future_loading, t0 = t, n_samples=NUM_SAMPLES, dt=TIME_STEP) + if step % PREDICTION_UPDATE_FREQ == 0: + mc_results = mc.predict( + filt.x, future_loading, t0=t, n_samples=NUM_SAMPLES, dt=TIME_STEP + ) metrics = mc_results.time_of_event.metrics() - print(' - ToE: {} (sigma: {})'.format(metrics['EOD']['mean'], metrics['EOD']['std'])) + print( + " - ToE: {} (sigma: {})".format( + metrics["EOD"]["mean"], metrics["EOD"]["std"] + ) + ) profile.add_prediction(t, mc_results.time_of_event) # Calculating Prognostic Horizon once the loop completes from progpy.uncertain_data.uncertain_data import UncertainData from progpy.metrics import samples as metrics - def criteria_eqn(tte : UncertainData, ground_truth_tte : dict) -> dict: + def criteria_eqn(tte: UncertainData, ground_truth_tte: dict) -> dict: """ - Sample criteria equation for playback. + Sample criteria equation for playback. # UPDATE THIS CRITERIA EQN AND WHAT IS CALCULATED Args: @@ -110,17 +127,23 @@ def criteria_eqn(tte : UncertainData, ground_truth_tte : dict) -> dict: ground_truth_tte : dict Dictionary of ground truth of time to event. """ - + # Set an alpha value bounds = {} for key, value in ground_truth_tte.items(): # Set bounds for precentage_in_bounds by adding/subtracting to the ground_truth alpha_calc = value * ALPHA - bounds[key] = [value - alpha_calc, value + alpha_calc] # Construct bounds for all events + bounds[key] = [ + value - alpha_calc, + value + alpha_calc, + ] # Construct bounds for all events percentage_in_bounds = tte.percentage_in_bounds(bounds) - + # Verify if percentage in bounds for this ground truth meets beta distribution percentage limit - return {key: percentage_in_bounds[key] > BETA for key in percentage_in_bounds.keys()} + return { + key: percentage_in_bounds[key] > BETA + for key in percentage_in_bounds.keys() + } # Generate plots for playback example playback_plots = profile.plot(GROUND_TRUTH, ALPHA, True) @@ -137,8 +160,9 @@ def criteria_eqn(tte : UncertainData, ground_truth_tte : dict) -> dict: cra = profile.cumulative_relative_accuracy(GROUND_TRUTH) print(f"Cumulative Relative Accuracy for 'EOD': {cra['EOD']}") - input('Press any key to exit') + input("Press any key to exit") + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/8db66840e6962d40de46c89261779dea/10_Prognostics Server.ipynb b/docs/_downloads/8db66840e6962d40de46c89261779dea/10_Prognostics Server.ipynb new file mode 100644 index 00000000..f7300291 --- /dev/null +++ b/docs/_downloads/8db66840e6962d40de46c89261779dea/10_Prognostics Server.ipynb @@ -0,0 +1,568 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Prognostics Server (prog_server)\n", + "\n", + "The ProgPy Server (`prog_server`) is a simplified implementation of a Service-Oriented Architecture (SOA) for performing prognostics (estimation of time until events and future system states) of engineering systems. `prog_server` is a wrapper around the ProgPy package, allowing one or more users to access the features of these packages through a REST API. The package is intended to be used as a research tool to prototype and benchmark Prognostics As-A-Service (PaaS) architectures and work on the challenges facing such architectures, including Generality, Communication, Security, Environmental Complexity, Utility, and Trust.\n", + "\n", + "The ProgPy Server is actually two packages, `prog_server` and `prog_client`. The `prog_server` package is a prognostics server that provides the REST API. The `prog_client` package is a python client that provides functions to interact with the server via the REST API.\n", + "\n", + "**TODO(CT): IMAGE- server with clients**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Installing](#Installing)\n", + "* [Starting prog_server](#Starting-prog_server)\n", + " * [Command Line](#Command-Line)\n", + " * [Programmtically](#Programatically)\n", + "* [Using prog_server with prog_client](#Using-prog_server-with-prog_client)\n", + " * [Online Prognostics Example](#Online-Prognostics-Example)\n", + " * [Option Scoring Example](#Option-scoring-example)\n", + "* [Using prog_server with REST Interface](#Using-prog_server-with-REST-Interface)\n", + "* [Custom Models](#Custom-Models)\n", + "* [Closing prog_server](#Closing-prog_server)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installing\n", + "\n", + "`prog_server` can be installed using pip\n", + "\n", + "```console\n", + "$ pip install prog_server\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Starting prog_server" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`prog_server` can be started through the command line or programatically (i.e., in a python script). Once the server is started, it will take a short time to initialize. Then, it will start receiving requests for sessions from clients using `prog_client`, or interacting directly using the REST interface." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Command Line\n", + "Generally, you can start `prog_server` by running the module, like this:\n", + "\n", + "```console\n", + "$ python -m prog_server\n", + "```\n", + "\n", + "Note that you can force the server to start in debug mode using the `debug` flag. For example, `python -m prog_server --debug`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Programatically\n", + "There are two methods to start the `prog_server` programatically in python. The first, below, is non-blocking and allows users to perform other functions while the server is running." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import prog_server\n", + "\n", + "prog_server.start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When starting a server, users can also provide arguments to customize the way the server runs. Here are the main arguments used:\n", + "\n", + "* `host` (str): Server host address. Defaults to ‘127.0.0.1’\n", + "* `port` (int): Server port address. Defaults to 8555\n", + "* `debug` (bool): If the server is to be started in debug mode\n", + "\n", + "Now `prog_server` is ready to start receiving session requests from users. The server can also be stopped using the `stop()` function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prog_server.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`prog_server` can also be started in blocked mode using the following command:\n", + "\n", + "```python\n", + ">>> prog_server.run()\n", + "```\n", + "\n", + "We will not execute it here, because it would block execution in this notebook until we force quit.\n", + "\n", + "For details on all supported arguments, see the [API Doc](https://nasa.github.io/progpy/api_ref/prog_server/prog_server.html#prog_server.start).\n", + "\n", + "The basis of `prog_server` is the session. Each user creates one or more session. These sessions are each a request for prognostic services. Then the user can interact with the open session. You'll see examples of this in the future sections.\n", + "\n", + "Let's restart the server again so it can be used with the below examples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prog_server.start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using prog_server with prog_client\n", + "\n", + "For users using python, `prog_server` can be interacted with using the `prog_client` package distributed with ProgPy. This section describes a few examples using `prog_client` and `prog_server` together.\n", + "\n", + "We will first import the needed package." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import prog_client" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Online Prognostics Example\n", + "This example creates a session with the server to run prognostics for a Thrown Object, a simplified model of an object thrown into the air. Data is then sent to the server and a prediction is requested. The prediction is then displayed.\n", + "\n", + "**Note: before running this example, make sure `prog_server` is running.**\n", + "\n", + "The first step is to open a session with the server. This starts a session for prognostics with the ThrownObject model, with default parameters. The prediction configuration is updated to have a save frequency of every 1 second." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session = prog_client.Session(\"ThrownObject\", pred_cfg={\"save_freq\": 1})\n", + "print(session) # Printing the Session Information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you were to re-run the lines above, it would start a new session, with a new number.\n", + "\n", + "Next, we need to prepare the data we will use for this example. The data is a dictionary, and the keys are the names of the inputs and outputs in the model with format (time, value).\n", + "\n", + "Note that in an actual application, the data would be received from a sensor or other source. The structure below is used to emulate the sensor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "example_data = [\n", + " (0, {\"x\": 1.83}),\n", + " (0.1, {\"x\": 5.81}),\n", + " (0.2, {\"x\": 9.75}),\n", + " (0.3, {\"x\": 13.51}),\n", + " (0.4, {\"x\": 17.20}),\n", + " (0.5, {\"x\": 20.87}),\n", + " (0.6, {\"x\": 24.37}),\n", + " (0.7, {\"x\": 27.75}),\n", + " (0.8, {\"x\": 31.09}),\n", + " (0.9, {\"x\": 34.30}),\n", + " (1.0, {\"x\": 37.42}),\n", + " (1.1, {\"x\": 40.43}),\n", + " (1.2, {\"x\": 43.35}),\n", + " (1.3, {\"x\": 46.17}),\n", + " (1.4, {\"x\": 48.91}),\n", + " (1.5, {\"x\": 51.53}),\n", + " (1.6, {\"x\": 54.05}),\n", + " (1.7, {\"x\": 56.50}),\n", + " (1.8, {\"x\": 58.82}),\n", + " (1.9, {\"x\": 61.05}),\n", + " (2.0, {\"x\": 63.20}),\n", + " (2.1, {\"x\": 65.23}),\n", + " (2.2, {\"x\": 67.17}),\n", + " (2.3, {\"x\": 69.02}),\n", + " (2.4, {\"x\": 70.75}),\n", + " (2.5, {\"x\": 72.40}),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we can start sending the data to the server, checking periodically to see if there is a completed prediction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from time import sleep\n", + "\n", + "LAST_PREDICTION_TIME = None\n", + "for i in range(len(example_data)):\n", + " # Send data to server\n", + " print(f\"{example_data[i][0]}s: Sending data to server... \", end=\"\")\n", + " session.send_data(time=example_data[i][0], **example_data[i][1])\n", + "\n", + " # Check for a prediction result\n", + " status = session.get_prediction_status()\n", + " if LAST_PREDICTION_TIME != status[\"last prediction\"]:\n", + " # New prediction result\n", + " LAST_PREDICTION_TIME = status[\"last prediction\"]\n", + " print(\"Prediction Completed\")\n", + "\n", + " # Get prediction\n", + " # Prediction is returned as a type uncertain_data, so you can manipulate it like that datatype.\n", + " # See https://nasa.github.io/prog_algs/uncertain_data.html\n", + " t, prediction = session.get_predicted_toe()\n", + " print(f\"Predicted ToE (using state from {t}s): \")\n", + " print(prediction.mean)\n", + "\n", + " # Get Predicted future states\n", + " # You can also get the predicted future states of the model.\n", + " # States are saved according to the prediction configuration parameter 'save_freq' or 'save_pts'\n", + " # In this example we have it setup to save every 1 second.\n", + " # Return type is UnweightedSamplesPrediction (since we're using the monte carlo predictor)\n", + " # See https://nasa.github.io/prog_algs\n", + " t, event_states = session.get_predicted_event_state()\n", + " print(f\"Predicted Event States (using state from {t}s): \")\n", + " es_means = [\n", + " (event_states.times[i], event_states.snapshot(i).mean)\n", + " for i in range(len(event_states.times))\n", + " ]\n", + " for time, es_mean in es_means:\n", + " print(f\"\\t{time}s: {es_mean}\")\n", + "\n", + " # Note: you can also get the predicted future states of the model (see get_predicted_states()) or performance parameters (see get_predicted_performance_metrics())\n", + "\n", + " else:\n", + " print(\"No prediction yet\")\n", + " # No updated prediction, send more data and check again later.\n", + " sleep(0.1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that the prediction wasn't updated every time step. It takes a bit of time to perform a prediction.\n", + "\n", + "Note that we can also get the model from `prog_server` to work with directly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = session.get_model()\n", + "\n", + "print(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Option Scoring Example\n", + "\n", + "This example creates a session with the server to run prognostics for a `BatteryCircuit` model. Three options with different loading profiles are compared by creating a session for each option and comparing the resulting prediction metrics.\n", + "\n", + "First step is to prepare load profiles to compare. Each load profile has format `Array[Dict]`. Where each dict is in format `{TIME: LOAD}`, where `TIME` is the start of that loading in seconds. `LOAD` is a dict with keys corresponding to model.inputs. Note that the dict must be in order of increasing time.\n", + "\n", + "Here we introduce 3 load profiles to be used with simulation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plan0 = {0: {\"i\": 2}, 600: {\"i\": 1}, 900: {\"i\": 4}, 1800: {\"i\": 2}, 3000: {\"i\": 3}}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plan1 = {0: {\"i\": 3}, 900: {\"i\": 2}, 1000: {\"i\": 3.5}, 2000: {\"i\": 2.5}, 2300: {\"i\": 3}}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plan2 = {\n", + " 0: {\"i\": 1.25},\n", + " 800: {\"i\": 2},\n", + " 1100: {\"i\": 2.5},\n", + " 2200: {\"i\": 6},\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "LOAD_PROFILES = [plan0, plan1, plan2]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next step is to open a session with the battery circuit model for each of the 3 plans. We are specifying a time of interest of 2000 seconds (for the sake of a demo). This could be the end of a mission/session, or some inspection time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sessions = [\n", + " prog_client.Session(\n", + " \"BatteryCircuit\",\n", + " pred_cfg={\"save_pts\": [2000], \"save_freq\": 1e99, \"n_samples\": 15},\n", + " load_est=\"Variable\",\n", + " load_est_cfg=LOAD_PROFILES[i],\n", + " )\n", + " for i in range(len(LOAD_PROFILES))\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's wait for prognostics to be complete." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for session in sessions:\n", + " sessions_in_progress = True\n", + " while sessions_in_progress:\n", + " sessions_in_progress = False\n", + " status = session.get_prediction_status()\n", + " if status[\"in progress\"] != 0:\n", + " print(f\"\\tSession {session.session_id} is still in progress\")\n", + " sessions_in_progress = True\n", + " sleep(5)\n", + " print(f\"\\tSession {session.session_id} complete\")\n", + "print(\"All sessions complete\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that the sessions are complete, we can get the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = [session.get_predicted_toe()[1] for session in sessions]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's compare results. Let's look at the mean Time to Event (`ToE`):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Mean ToE:\")\n", + "best_toe = 0\n", + "best_plan = None\n", + "for i in range(len(results)):\n", + " mean_toe = results[i].mean[\"EOD\"]\n", + " print(f\"\\tOption {i}: {mean_toe:0.2f}s\")\n", + " if mean_toe > best_toe:\n", + " best_toe = mean_toe\n", + " best_plan = i\n", + "print(f\"Best option using method 1: Option {best_plan}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As a second metric, let's look at the `SOC` at our point of interest (2000 seconds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "best_soc = 0\n", + "best_plan = None\n", + "soc = [session.get_predicted_event_state()[1] for session in sessions]\n", + "for i in range(len(soc)):\n", + " mean_soc = soc[i].snapshot(-1).mean[\"EOD\"]\n", + " print(f\"\\tOption {i}: {mean_soc:0.3f} SOC\")\n", + " if mean_soc > best_soc:\n", + " best_soc = mean_soc\n", + " best_plan = i\n", + "print(f\"Best option using method 2: Option {best_plan}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Other metrics can be used as well, like probability of mission success given a certain mission time, uncertainty in `ToE` estimate, final state at end of mission, among others." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using prog_server with REST Interface\n", + "\n", + "Communication with ProgPy is through a REST interface. The REST API is described here: [prog_server REST API](https://app.swaggerhub.com/apis-docs/teubert/prog_server/).\n", + "\n", + "Most programming languages have a way of interacting with REST APIs (either native or through a package/library). `curl` requests can also be used by command line or apps like Postman." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Models\n", + "**A version of this section will be added in release v1.9** " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Closing prog_server\n", + "When you're done using prog_server, make sure you turn off the server." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prog_server.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section, we have demonstrated how to use the ProgPy server, including `prog_server` and `prog_client`. This is the last notebook in the ProgPy tutorial series.\n", + "\n", + "For more information about ProgPy in general, check out the __[00 Intro](00_Intro.ipynb)__ notebook and [ProgPy documentation](https://nasa.github.io/progpy/index.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.11.0 ('env': venv)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "71ccad9e81d0b15f7bb5ef75e2d2ca570011b457fb5a41421e3ae9c0e4c33dfc" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/8e88b82d4689eed3ccf37d5a3def248e/predict_specific_event.py b/docs/_downloads/8e88b82d4689eed3ccf37d5a3def248e/predict_specific_event.py index 96d7d0f8..87b0b6c0 100644 --- a/docs/_downloads/8e88b82d4689eed3ccf37d5a3def248e/predict_specific_event.py +++ b/docs/_downloads/8e88b82d4689eed3ccf37d5a3def248e/predict_specific_event.py @@ -7,12 +7,14 @@ from prog_algs import state_estimators, predictors from progpy.models.thrown_object import ThrownObject + def run_example(): ## Setup m = ThrownObject() initial_state = m.initialize() - load = m.InputContainer({}) # Optimization - create once - def future_loading(t, x = None): + load = m.InputContainer({}) # Optimization - create once + + def future_loading(t, x=None): return load ## State Estimation - perform a single ukf state estimate step @@ -24,19 +26,22 @@ def future_loading(t, x = None): pred = predictors.UnscentedTransformPredictor(m) # Predict with a step size of 0.1 - mc_results = pred.predict(filt.x, future_loading, dt=0.1, save_freq= 1, events=['impact']) + mc_results = pred.predict( + filt.x, future_loading, dt=0.1, save_freq=1, events=["impact"] + ) # Print Results for i, time in enumerate(mc_results.times): - print('\nt = {}'.format(time)) - print('\tu = {}'.format(mc_results.inputs.snapshot(i).mean)) - print('\tx = {}'.format(mc_results.states.snapshot(i).mean)) - print('\tz = {}'.format(mc_results.outputs.snapshot(i).mean)) - print('\tevent state = {}'.format(mc_results.states.snapshot(i).mean)) + print("\nt = {}".format(time)) + print("\tu = {}".format(mc_results.inputs.snapshot(i).mean)) + print("\tx = {}".format(mc_results.states.snapshot(i).mean)) + print("\tz = {}".format(mc_results.outputs.snapshot(i).mean)) + print("\tevent state = {}".format(mc_results.states.snapshot(i).mean)) # Note only impact event is shown here - print('\nToE:', mc_results.time_of_event.mean) + print("\nToE:", mc_results.time_of_event.mean) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/90ec5abc077be09b4e81e355b1489fc5/new_model.ipynb b/docs/_downloads/90ec5abc077be09b4e81e355b1489fc5/new_model.ipynb index f49a2219..9a1c279c 100644 --- a/docs/_downloads/90ec5abc077be09b4e81e355b1489fc5/new_model.ipynb +++ b/docs/_downloads/90ec5abc077be09b4e81e355b1489fc5/new_model.ipynb @@ -1,54 +1,189 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample defining and testing a new model. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from prog_models import PrognosticsModel\n\n\nclass ThrownObject(PrognosticsModel):\n \"\"\"\n Model that similates an object thrown into the air without air resistance\n \"\"\"\n\n inputs = [] # no inputs, no way to control\n states = [\n 'x', # Position (m) \n 'v' # Velocity (m/s)\n ]\n outputs = [ # Anything we can measure\n 'x' # Position (m)\n ]\n events = [\n 'falling', # Event- object is falling\n 'impact' # Event- object has impacted ground\n ]\n\n # The Default parameters. Overwritten by passing parameters dictionary into constructor\n default_parameters = {\n 'thrower_height': 1.83, # m\n 'throwing_speed': 40, # m/s\n 'g': -9.81, # Acceleration due to gravity in m/s^2\n 'process_noise': 0.0 # amount of noise in each step\n }\n\n def initialize(self, u, z):\n self.max_x = 0.0\n return self.StateContainer({\n 'x': self.parameters['thrower_height'], # Thrown, so initial altitude is height of thrower\n 'v': self.parameters['throwing_speed'] # Velocity at which the ball is thrown - this guy is a professional baseball pitcher\n })\n \n def dx(self, x, u):\n return self.StateContainer({'x': x['v'],\n 'v': self.parameters['g']}) # Acceleration of gravity\n\n def output(self, x):\n return self.OutputContainer({'x': x['x']})\n\n # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.\n # Threshold = Event State == 0. However, this implementation is more efficient, so we included it\n def threshold_met(self, x):\n return {\n 'falling': x['v'] < 0,\n 'impact': x['x'] <= 0\n }\n\n def event_state(self, x): \n self.max_x = max(self.max_x, x['x']) # Maximum altitude\n return {\n 'falling': max(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed\n 'impact': max(x['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height\n }\n\ndef run_example():\n # Demo model\n # Step 1: Create instance of model\n m = ThrownObject()\n\n # Step 2: Setup for simulation \n def future_load(t, x=None):\n return m.InputContainer({}) # No inputs, no way to control\n\n # Step 3: Simulate to impact\n event = 'impact'\n simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, print = True)\n \n # Print flight time\n print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2)))\n\n # OK, now lets compare performance on different heavenly bodies. \n # This requires that we update the cofiguration\n grav_moon = -1.62\n\n # The first way to change the configuration is to pass in your desired config into construction of the model\n m = ThrownObject(g = grav_moon)\n simulated_moon_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1})\n\n grav_mars = -3.711\n # You can also update the parameters after it's constructed\n m.parameters['g'] = grav_mars\n simulated_mars_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1})\n\n grav_venus = -8.87\n m.parameters['g'] = grav_venus\n simulated_venus_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1})\n\n print('Time to hit the ground: ')\n print('\\tvenus: {}s'.format(round(simulated_venus_results.times[-1],2)))\n print('\\tearth: {}s'.format(round(simulated_results.times[-1],2)))\n print('\\tmars: {}s'.format(round(simulated_mars_results.times[-1],2)))\n print('\\tmoon: {}s'.format(round(simulated_moon_results.times[-1],2)))\n\n # We can also simulate until any event is met by neglecting the threshold_keys argument\n simulated_results = m.simulate_to_threshold(future_load, options={'dt':0.005, 'save_freq':1})\n threshs_met = m.threshold_met(simulated_results.states[-1])\n for (key, met) in threshs_met.items():\n if met:\n event_occured = key\n print('\\nThis event that occured first: ', event_occured)\n # It falls before it hits the gorund, obviously\n\n # Metrics can be analyzed from the simulation results. For example: monotonicity\n print('\\nMonotonicity: ', simulated_results.event_states.monotonicity())\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample defining and testing a new model. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from prog_models import PrognosticsModel\n", + "\n", + "\n", + "class ThrownObject(PrognosticsModel):\n", + " \"\"\"\n", + " Model that similates an object thrown into the air without air resistance\n", + " \"\"\"\n", + "\n", + " inputs = [] # no inputs, no way to control\n", + " states = [\n", + " \"x\", # Position (m)\n", + " \"v\", # Velocity (m/s)\n", + " ]\n", + " outputs = [ # Anything we can measure\n", + " \"x\" # Position (m)\n", + " ]\n", + " events = [\n", + " \"falling\", # Event- object is falling\n", + " \"impact\", # Event- object has impacted ground\n", + " ]\n", + "\n", + " # The Default parameters. Overwritten by passing parameters dictionary into constructor\n", + " default_parameters = {\n", + " \"thrower_height\": 1.83, # m\n", + " \"throwing_speed\": 40, # m/s\n", + " \"g\": -9.81, # Acceleration due to gravity in m/s^2\n", + " \"process_noise\": 0.0, # amount of noise in each step\n", + " }\n", + "\n", + " def initialize(self, u, z):\n", + " self.max_x = 0.0\n", + " return self.StateContainer(\n", + " {\n", + " \"x\": self.parameters[\n", + " \"thrower_height\"\n", + " ], # Thrown, so initial altitude is height of thrower\n", + " \"v\": self.parameters[\n", + " \"throwing_speed\"\n", + " ], # Velocity at which the ball is thrown - this guy is a professional baseball pitcher\n", + " }\n", + " )\n", + "\n", + " def dx(self, x, u):\n", + " return self.StateContainer(\n", + " {\"x\": x[\"v\"], \"v\": self.parameters[\"g\"]}\n", + " ) # Acceleration of gravity\n", + "\n", + " def output(self, x):\n", + " return self.OutputContainer({\"x\": x[\"x\"]})\n", + "\n", + " # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.\n", + " # Threshold = Event State == 0. However, this implementation is more efficient, so we included it\n", + " def threshold_met(self, x):\n", + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}\n", + "\n", + " def event_state(self, x):\n", + " self.max_x = max(self.max_x, x[\"x\"]) # Maximum altitude\n", + " return {\n", + " \"falling\": max(\n", + " x[\"v\"] / self.parameters[\"throwing_speed\"], 0\n", + " ), # Throwing speed is max speed\n", + " \"impact\": max(\n", + " x[\"x\"] / self.max_x, 0\n", + " ), # 1 until falling begins, then it's fraction of height\n", + " }\n", + "\n", + "\n", + "def run_example():\n", + " # Demo model\n", + " # Step 1: Create instance of model\n", + " m = ThrownObject()\n", + "\n", + " # Step 2: Setup for simulation\n", + " def future_load(t, x=None):\n", + " return m.InputContainer({}) # No inputs, no way to control\n", + "\n", + " # Step 3: Simulate to impact\n", + " event = \"impact\"\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], dt=0.005, save_freq=1, print=True\n", + " )\n", + "\n", + " # Print flight time\n", + " print(\n", + " \"The object hit the ground in {} seconds\".format(\n", + " round(simulated_results.times[-1], 2)\n", + " )\n", + " )\n", + "\n", + " # OK, now lets compare performance on different heavenly bodies.\n", + " # This requires that we update the cofiguration\n", + " grav_moon = -1.62\n", + "\n", + " # The first way to change the configuration is to pass in your desired config into construction of the model\n", + " m = ThrownObject(g=grav_moon)\n", + " simulated_moon_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], options={\"dt\": 0.005, \"save_freq\": 1}\n", + " )\n", + "\n", + " grav_mars = -3.711\n", + " # You can also update the parameters after it's constructed\n", + " m.parameters[\"g\"] = grav_mars\n", + " simulated_mars_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], options={\"dt\": 0.005, \"save_freq\": 1}\n", + " )\n", + "\n", + " grav_venus = -8.87\n", + " m.parameters[\"g\"] = grav_venus\n", + " simulated_venus_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], options={\"dt\": 0.005, \"save_freq\": 1}\n", + " )\n", + "\n", + " print(\"Time to hit the ground: \")\n", + " print(\"\\tvenus: {}s\".format(round(simulated_venus_results.times[-1], 2)))\n", + " print(\"\\tearth: {}s\".format(round(simulated_results.times[-1], 2)))\n", + " print(\"\\tmars: {}s\".format(round(simulated_mars_results.times[-1], 2)))\n", + " print(\"\\tmoon: {}s\".format(round(simulated_moon_results.times[-1], 2)))\n", + "\n", + " # We can also simulate until any event is met by neglecting the threshold_keys argument\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, options={\"dt\": 0.005, \"save_freq\": 1}\n", + " )\n", + " threshs_met = m.threshold_met(simulated_results.states[-1])\n", + " for key, met in threshs_met.items():\n", + " if met:\n", + " event_occured = key\n", + " print(\"\\nThis event that occured first: \", event_occured)\n", + " # It falls before it hits the gorund, obviously\n", + "\n", + " # Metrics can be analyzed from the simulation results. For example: monotonicity\n", + " print(\"\\nMonotonicity: \", simulated_results.event_states.monotonicity())\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/912ba6f560fc796ecfebae899799fbca/uav_dynamics_model.py b/docs/_downloads/912ba6f560fc796ecfebae899799fbca/uav_dynamics_model.py index 81092422..8c21aa33 100644 --- a/docs/_downloads/912ba6f560fc796ecfebae899799fbca/uav_dynamics_model.py +++ b/docs/_downloads/912ba6f560fc796ecfebae899799fbca/uav_dynamics_model.py @@ -16,10 +16,7 @@ def run_example(): # Initialize vehicle vehicle = SmallRotorcraft( - dt=0.05, - vehicle_model='tarot18', - process_noise=0, - measurement_noise=0 + dt=0.05, vehicle_model="tarot18", process_noise=0, measurement_noise=0 ) # EXAMPLE 1: @@ -33,20 +30,92 @@ def run_example(): # Here, we specify waypoints in a dictionary and then pass # lat/lon/alt/ETAs into the trajectory class - lat_deg = np.array([37.09776, 37.09776, 37.09776, 37.09798, 37.09748, 37.09665, 37.09703, 37.09719, 37.09719, 37.09719, 37.09719, 37.09748, 37.09798, 37.09776, 37.09776]) - lon_deg = np.array([-76.38631, -76.38629, -76.38629, -76.38589, -76.3848, -76.38569, -76.38658, -76.38628, -76.38628, -76.38628, -76.38628, -76.3848, -76.38589, -76.38629, -76.38629]) - alt_ft = np.array([-1.9682394, 164.01995, 164.01995, 164.01995, 164.01995, 164.01995, 164.01995, 164.01995, 0.0, 0.0, 164.01995, 164.01995, 164.01995, 164.01995, 0.0]) - time_unix = [1544188336, 1544188358, 1544188360, 1544188377, 1544188394, 1544188411, 1544188428, 1544188496, 1544188539, 1544188584, 1544188601, 1544188635, 1544188652, 1544188672, 1544188692] + lat_deg = np.array( + [ + 37.09776, + 37.09776, + 37.09776, + 37.09798, + 37.09748, + 37.09665, + 37.09703, + 37.09719, + 37.09719, + 37.09719, + 37.09719, + 37.09748, + 37.09798, + 37.09776, + 37.09776, + ] + ) + lon_deg = np.array( + [ + -76.38631, + -76.38629, + -76.38629, + -76.38589, + -76.3848, + -76.38569, + -76.38658, + -76.38628, + -76.38628, + -76.38628, + -76.38628, + -76.3848, + -76.38589, + -76.38629, + -76.38629, + ] + ) + alt_ft = np.array( + [ + -1.9682394, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 0.0, + 0.0, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 0.0, + ] + ) + time_unix = [ + 1544188336, + 1544188358, + 1544188360, + 1544188377, + 1544188394, + 1544188411, + 1544188428, + 1544188496, + 1544188539, + 1544188584, + 1544188601, + 1544188635, + 1544188652, + 1544188672, + 1544188692, + ] # Generate trajectory # ===================== # Generate trajectory object and pass the route (waypoints, ETA) to it - traj = Trajectory(lat=lat_deg * np.pi/180.0, - lon=lon_deg * np.pi/180.0, - alt=alt_ft * 0.3048, - etas=time_unix) + traj = Trajectory( + lat=lat_deg * np.pi / 180.0, + lon=lon_deg * np.pi / 180.0, + alt=alt_ft * 0.3048, + etas=time_unix, + ) - ref_traj = traj.generate(dt=vehicle.parameters['dt']) + ref_traj = traj.generate(dt=vehicle.parameters["dt"]) # Define controller and build scheduled control. The controller acts as a # future_loading function when simulating @@ -61,9 +130,8 @@ def run_example(): # Simulate vehicle to fly trajectory traj_results = vehicle.simulate_to_threshold( - ctrl, - dt=vehicle.parameters['dt'], - save_freq=vehicle.parameters['dt']) + ctrl, dt=vehicle.parameters["dt"], save_freq=vehicle.parameters["dt"] + ) # Visualize Results vehicle.visualize_traj(pred=traj_results, ref=ref_traj) @@ -71,17 +139,19 @@ def run_example(): # EXAMPLE 2: # In this example, we define another trajectory through the same # waypoints but with speeds defined instead of ETAs - + # Generate trajectory object and pass the route (lat/lon/alt, no ETAs) # and speed information to it - traj_speed = Trajectory(lat=lat_deg * np.pi/180.0, - lon=lon_deg * np.pi/180.0, - alt=alt_ft * 0.3048, - cruise_speed=8.0, - ascent_speed=2.0, - descent_speed=3.0, - landing_speed=2.0) - ref_traj_speeds = traj_speed.generate(dt=vehicle.parameters['dt']) + traj_speed = Trajectory( + lat=lat_deg * np.pi / 180.0, + lon=lon_deg * np.pi / 180.0, + alt=alt_ft * 0.3048, + cruise_speed=8.0, + ascent_speed=2.0, + descent_speed=3.0, + landing_speed=2.0, + ) + ref_traj_speeds = traj_speed.generate(dt=vehicle.parameters["dt"]) # Define controller and build scheduled control. This time we'll use LQR_I, # which is a linear quadratic regulator with integral action. @@ -90,12 +160,9 @@ def run_example(): # This version of LQR_I compensates for integral errors in the position of # the vehicle, i.e., x, y, z variables of the state vector. ctrl_speeds = LQR_I(ref_traj_speeds, vehicle) - + # Set simulation options - options = { - 'dt': vehicle.parameters['dt'], - 'save_freq': vehicle.parameters['dt'] - } + options = {"dt": vehicle.parameters["dt"], "save_freq": vehicle.parameters["dt"]} # Simulate vehicle to fly trajectory traj_results_speeds = vehicle.simulate_to_threshold(ctrl_speeds, **options) @@ -115,7 +182,25 @@ def run_example(): # First, we'll re-define the ETAs in the waypoints dictionary # (since we deleted them from the waypoints in Example 2) - time_unix = np.array([1544188336, 1544188358, 1544188360, 1544188377, 1544188394, 1544188411, 1544188428, 1544188496, 1544188539, 1544188584, 1544188601, 1544188635, 1544188652, 1544188672, 1544188692]) + time_unix = np.array( + [ + 1544188336, + 1544188358, + 1544188360, + 1544188377, + 1544188394, + 1544188411, + 1544188428, + 1544188496, + 1544188539, + 1544188584, + 1544188601, + 1544188635, + 1544188652, + 1544188672, + 1544188692, + ] + ) # Extract time information for desired interval, starting at waypoint 10 # and ending at waypoint 13 @@ -124,32 +209,33 @@ def run_example(): sim_time = end_time - start_time # Define initial state, x0, based on reference trajectory at start_time - ind = np.where(ref_traj['t'] == start_time) + ind = np.where(ref_traj["t"] == start_time) x0 = {key: ref_traj[key][ind][0] for key in ref_traj.keys()} - vehicle.parameters['x0'] = x0 + vehicle.parameters["x0"] = x0 # Define simulation parameters - note that we must define t0 as start_time # since we are not starting at the default of t0 = 0 options = { - 'dt': vehicle.parameters['dt'], - 'save_freq': vehicle.parameters['dt'], - 't0': start_time + "dt": vehicle.parameters["dt"], + "save_freq": vehicle.parameters["dt"], + "t0": start_time, } # Simulate starting from this initial state from start_time to end_time traj_results_interval = vehicle.simulate_to(sim_time, ctrl, **options) # Plot results with Example 1 results to show equivalence on this interval - z_1 = [output['z'] for output in traj_results.outputs] - z_4 = [output['z'] for output in traj_results_interval.outputs] + z_1 = [output["z"] for output in traj_results.outputs] + z_4 = [output["z"] for output in traj_results_interval.outputs] fig, ax = plt.subplots() - ax.plot(traj_results.times, z_1, '-b', label='Example 1') - ax.plot(traj_results_interval.times, z_4, '--r', label='Example 3') - ax.set_xlabel('time, s', fontsize=14) - ax.set_ylabel('altitude, m', fontsize=14) + ax.plot(traj_results.times, z_1, "-b", label="Example 1") + ax.plot(traj_results_interval.times, z_4, "--r", label="Example 3") + ax.set_xlabel("time, s", fontsize=14) + ax.set_ylabel("altitude, m", fontsize=14) ax.legend() + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/9200218999c89b36419e98bafeffa86b/linear_model.ipynb b/docs/_downloads/9200218999c89b36419e98bafeffa86b/linear_model.ipynb index e41586d6..44447b6f 100644 --- a/docs/_downloads/9200218999c89b36419e98bafeffa86b/linear_model.ipynb +++ b/docs/_downloads/9200218999c89b36419e98bafeffa86b/linear_model.ipynb @@ -1,54 +1,170 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nThis example shows the use of the LinearModel class, a subclass of PrognosticsModel for models that can be described as a linear time series. \n\nThe model is used in a simulation, and the state is printed every second\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from prog_models import LinearModel\nimport numpy as np\n\nclass ThrownObject(LinearModel):\n \"\"\"\n Model that similates an object thrown into the air without air resistance\n\n Events (2)\n | falling: The object is falling\n | impact: The object has hit the ground\n\n Inputs/Loading: (0)\n\n States: (2)\n | x: Position in space (m)\n | v: Velocity in space (m/s)\n\n Outputs/Measurements: (1)\n | x: Position in space (m)\n\n Keyword Args\n ------------\n process_noise : Optional, float or Dict[Srt, float]\n Process noise (applied at dx/next_state). \n Can be number (e.g., .2) applied to every state, a dictionary of values for each \n state (e.g., {'x1': 0.2, 'x2': 0.3}), or a function (x) -> x\n process_noise_dist : Optional, String\n distribution for process noise (e.g., normal, uniform, triangular)\n measurement_noise : Optional, float or Dict[Srt, float]\n Measurement noise (applied in output eqn).\n Can be number (e.g., .2) applied to every output, a dictionary of values for each\n output (e.g., {'z1': 0.2, 'z2': 0.3}), or a function (z) -> z\n measurement_noise_dist : Optional, String\n distribution for measurement noise (e.g., normal, uniform, triangular)\n g : Optional, float\n Acceleration due to gravity (m/s^2). Default is 9.81 m/s^2 (standard gravity)\n thrower_height : Optional, float\n Height of the thrower (m). Default is 1.83 m\n throwing_speed : Optional, float\n Speed at which the ball is thrown (m/s). Default is 40 m/s\n \"\"\"\n\n inputs = [] # no inputs, no way to control\n states = [\n 'x', # Position (m) \n 'v' # Velocity (m/s)\n ]\n outputs = [\n 'x' # Position (m)\n ]\n events = [\n 'impact' # Event- object has impacted ground\n ]\n\n # These are the core of the linear model. \n # Linear models defined by the following equations:\n # * dx/dt = Ax + Bu + E\n # * z = Cx + D\n # * event states = Fx + G\n A = np.array([[0, 1], [0, 0]]) # dx/dt = Ax + Bu + E\n E = np.array([[0], [-9.81]]) # Acceleration due to gravity (m/s^2)\n C = np.array([[1, 0]]) # z = Cx + D\n F = None # Will override method\n\n # The Default parameters. Overwritten by passing parameters dictionary into constructor\n default_parameters = {\n 'thrower_height': 1.83, # m\n 'throwing_speed': 40, # m/s\n 'g': -9.81 # Acceleration due to gravity in m/s^2\n }\n\n def initialize(self, u=None, z=None):\n return self.StateContainer({\n 'x': self.parameters['thrower_height'], # Thrown, so initial altitude is height of thrower\n 'v': self.parameters['throwing_speed'] # Velocity at which the ball is thrown - this guy is a professional baseball pitcher\n })\n \n # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.\n # Threshold = Event State == 0. However, this implementation is more efficient, so we included it\n def threshold_met(self, x):\n return {\n 'falling': x['v'] < 0,\n 'impact': x['x'] <= 0\n }\n\n def event_state(self, x): \n x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height\n return {\n 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed\n 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1 # 1 until falling begins, then it's fraction of height\n }\n\ndef run_example():\n m = ThrownObject()\n def future_loading(t, x=None):\n return m.InputContainer({}) # No loading \n m.simulate_to_threshold(future_loading, print = True, save_freq=1, threshold_keys='impact', dt=0.1)\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nThis example shows the use of the LinearModel class, a subclass of PrognosticsModel for models that can be described as a linear time series. \n\nThe model is used in a simulation, and the state is printed every second\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from prog_models import LinearModel\n", + "import numpy as np\n", + "\n", + "\n", + "class ThrownObject(LinearModel):\n", + " \"\"\"\n", + " Model that similates an object thrown into the air without air resistance\n", + "\n", + " Events (2)\n", + " | falling: The object is falling\n", + " | impact: The object has hit the ground\n", + "\n", + " Inputs/Loading: (0)\n", + "\n", + " States: (2)\n", + " | x: Position in space (m)\n", + " | v: Velocity in space (m/s)\n", + "\n", + " Outputs/Measurements: (1)\n", + " | x: Position in space (m)\n", + "\n", + " Keyword Args\n", + " ------------\n", + " process_noise : Optional, float or Dict[Srt, float]\n", + " Process noise (applied at dx/next_state).\n", + " Can be number (e.g., .2) applied to every state, a dictionary of values for each\n", + " state (e.g., {'x1': 0.2, 'x2': 0.3}), or a function (x) -> x\n", + " process_noise_dist : Optional, String\n", + " distribution for process noise (e.g., normal, uniform, triangular)\n", + " measurement_noise : Optional, float or Dict[Srt, float]\n", + " Measurement noise (applied in output eqn).\n", + " Can be number (e.g., .2) applied to every output, a dictionary of values for each\n", + " output (e.g., {'z1': 0.2, 'z2': 0.3}), or a function (z) -> z\n", + " measurement_noise_dist : Optional, String\n", + " distribution for measurement noise (e.g., normal, uniform, triangular)\n", + " g : Optional, float\n", + " Acceleration due to gravity (m/s^2). Default is 9.81 m/s^2 (standard gravity)\n", + " thrower_height : Optional, float\n", + " Height of the thrower (m). Default is 1.83 m\n", + " throwing_speed : Optional, float\n", + " Speed at which the ball is thrown (m/s). Default is 40 m/s\n", + " \"\"\"\n", + "\n", + " inputs = [] # no inputs, no way to control\n", + " states = [\n", + " \"x\", # Position (m)\n", + " \"v\", # Velocity (m/s)\n", + " ]\n", + " outputs = [\n", + " \"x\" # Position (m)\n", + " ]\n", + " events = [\n", + " \"impact\" # Event- object has impacted ground\n", + " ]\n", + "\n", + " # These are the core of the linear model.\n", + " # Linear models defined by the following equations:\n", + " # * dx/dt = Ax + Bu + E\n", + " # * z = Cx + D\n", + " # * event states = Fx + G\n", + " A = np.array([[0, 1], [0, 0]]) # dx/dt = Ax + Bu + E\n", + " E = np.array([[0], [-9.81]]) # Acceleration due to gravity (m/s^2)\n", + " C = np.array([[1, 0]]) # z = Cx + D\n", + " F = None # Will override method\n", + "\n", + " # The Default parameters. Overwritten by passing parameters dictionary into constructor\n", + " default_parameters = {\n", + " \"thrower_height\": 1.83, # m\n", + " \"throwing_speed\": 40, # m/s\n", + " \"g\": -9.81, # Acceleration due to gravity in m/s^2\n", + " }\n", + "\n", + " def initialize(self, u=None, z=None):\n", + " return self.StateContainer(\n", + " {\n", + " \"x\": self.parameters[\n", + " \"thrower_height\"\n", + " ], # Thrown, so initial altitude is height of thrower\n", + " \"v\": self.parameters[\n", + " \"throwing_speed\"\n", + " ], # Velocity at which the ball is thrown - this guy is a professional baseball pitcher\n", + " }\n", + " )\n", + "\n", + " # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.\n", + " # Threshold = Event State == 0. However, this implementation is more efficient, so we included it\n", + " def threshold_met(self, x):\n", + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}\n", + "\n", + " def event_state(self, x):\n", + " x_max = x[\"x\"] + np.square(x[\"v\"]) / (\n", + " -self.parameters[\"g\"] * 2\n", + " ) # Use speed and position to estimate maximum height\n", + " return {\n", + " \"falling\": np.maximum(\n", + " x[\"v\"] / self.parameters[\"throwing_speed\"], 0\n", + " ), # Throwing speed is max speed\n", + " \"impact\": np.maximum(x[\"x\"] / x_max, 0)\n", + " if x[\"v\"] < 0\n", + " else 1, # 1 until falling begins, then it's fraction of height\n", + " }\n", + "\n", + "\n", + "def run_example():\n", + " m = ThrownObject()\n", + "\n", + " def future_loading(t, x=None):\n", + " return m.InputContainer({}) # No loading\n", + "\n", + " m.simulate_to_threshold(\n", + " future_loading, print=True, save_freq=1, threshold_keys=\"impact\", dt=0.1\n", + " )\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/95185cf727a21ad753717b8332bb063f/kalman_filter.py b/docs/_downloads/95185cf727a21ad753717b8332bb063f/kalman_filter.py index a914f8c2..2cfe1ed6 100644 --- a/docs/_downloads/95185cf727a21ad753717b8332bb063f/kalman_filter.py +++ b/docs/_downloads/95185cf727a21ad753717b8332bb063f/kalman_filter.py @@ -32,8 +32,8 @@ class ThrownObject(LinearModel): Keyword Args ------------ process_noise : Optional, float or Dict[Srt, float] - Process noise (applied at dx/next_state). - Can be number (e.g., .2) applied to every state, a dictionary of values for each + Process noise (applied at dx/next_state). + Can be number (e.g., .2) applied to every state, a dictionary of values for each state (e.g., {'x1': 0.2, 'x2': 0.3}), or a function (x) -> x process_noise_dist : Optional, String distribution for process noise (e.g., normal, uniform, triangular) @@ -53,87 +53,94 @@ class ThrownObject(LinearModel): inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] + "x", # Position (m) + "v", # Velocity (m/s) + ] outputs = [ - 'x' # Position (m) + "x" # Position (m) ] events = [ - 'impact' # Event- object has impacted ground + "impact" # Event- object has impacted ground ] A = np.array([[0, 1], [0, 0]]) E = np.array([[0], [-9.81]]) C = np.array([[1, 0]]) - F = None # Will override method + F = None # Will override method - # The Default parameters. + # The Default parameters. # Overwritten by passing parameters dictionary into constructor default_parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81 # Acceleration due to gravity in m/s^2 + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 } def initialize(self, u=None, z=None): - return self.StateContainer({ - 'x': self.parameters['thrower_height'], - # Thrown, so initial altitude is height of thrower - 'v': self.parameters['throwing_speed'] - # Velocity at which the ball is thrown - this guy is a professional baseball pitcher - }) - + return self.StateContainer( + { + "x": self.parameters["thrower_height"], + # Thrown, so initial altitude is height of thrower + "v": self.parameters["throwing_speed"], + # Velocity at which the ball is thrown - this guy is a professional baseball pitcher + } + ) + # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. - # Threshold is met when Event State == 0. + # Threshold is met when Event State == 0. # However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height + def event_state(self, x): + x_max = x["x"] + np.square(x["v"]) / ( + -self.parameters["g"] * 2 + ) # Use speed and position to estimate maximum height return { - 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1 # 1 until falling begins, then it's fraction of height + "falling": np.maximum( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": np.maximum(x["x"] / x_max, 0) + if x["v"] < 0 + else 1, # 1 until falling begins, then it's fraction of height } + def run_example(): # Step 1: Instantiate the model - m = ThrownObject(process_noise = 0, measurement_noise = 0) + m = ThrownObject(process_noise=0, measurement_noise=0) # Step 2: Instantiate the Kalman Filter State Estimator # Define the initial state to be slightly off of actual - x_guess = m.StateContainer({'x': 1.75, 'v': 35}) # Guess of initial state + x_guess = m.StateContainer({"x": 1.75, "v": 35}) # Guess of initial state # Note: actual is {'x': 1.83, 'v': 40} kf = KalmanFilter(m, x_guess) # Step 3: Run the Kalman Filter State Estimator - # Here we're using simulated data from the thrown_object. + # Here we're using simulated data from the thrown_object. # In a real application you would be using sensor data from the system dt = 0.01 # Time step (s) print_freq = 50 # Print every print_freq'th iteration x = m.initialize() u = m.InputContainer({}) # No input for this model - + for i in range(500): # Get simulated output (would be measured in a real application) z = m.output(x) # Estimate New State - kf.estimate(i*dt, u, z) + kf.estimate(i * dt, u, z) x_est = kf.x.mean # Print Results - if i%print_freq == 0: # Print every print_freq'th iteration - print(f"t: {i*dt:.2f}\n\tEstimate: {x_est}\n\tTruth: {x}") + if i % print_freq == 0: # Print every print_freq'th iteration + print(f"t: {i * dt:.2f}\n\tEstimate: {x_est}\n\tTruth: {x}") diff = {key: x_est[key] - x[key] for key in x.keys()} print(f"\t Diff: {diff}") # Update Real state for next step x = m.next_state(x, u, dt) -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py b/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py index 0e39ae4b..02a7fae7 100644 --- a/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py +++ b/docs/_downloads/98e318f1c85976fc33e5b193dd2922a1/sensitivity.py @@ -2,13 +2,15 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example performing a sensitivity analysis on a new model. +Example performing a sensitivity analysis on a new model. """ import numpy as np + # Deriv prog model was selected because the model can be described as x' = x + dx*dt from progpy.models.thrown_object import ThrownObject + def run_example(): # Demo model # Step 1: Create instance of model @@ -17,33 +19,56 @@ def run_example(): # Step 2: Setup range on parameters considered thrower_height_range = np.arange(1.2, 2.1, 0.1) - # Step 3: Sim for each - event = 'impact' + # Step 3: Sim for each + event = "impact" eods = np.empty(len(thrower_height_range)) - for (i, thrower_height) in zip(range(len(thrower_height_range)), thrower_height_range): - m.parameters['thrower_height'] = thrower_height - simulated_results = m.simulate_to_threshold(events=event, dt =1e-3, save_freq =10) + for i, thrower_height in zip( + range(len(thrower_height_range)), thrower_height_range + ): + m.parameters["thrower_height"] = thrower_height + simulated_results = m.simulate_to_threshold(events=event, dt=1e-3, save_freq=10) eods[i] = simulated_results.times[-1] # Step 4: Analysis - print('For a reasonable range of heights, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3))) - sensitivity = (eods[-1]-eods[0])/(thrower_height_range[-1] - thrower_height_range[0]) - print(' - Average sensitivity: {} s per cm height'.format(round(sensitivity/100, 6))) + print( + "For a reasonable range of heights, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / ( + thrower_height_range[-1] - thrower_height_range[0] + ) + print( + " - Average sensitivity: {} s per cm height".format( + round(sensitivity / 100, 6) + ) + ) print(" - It seems impact time is not very sensitive to thrower's height") # Now lets repeat for throw speed throw_speed_range = np.arange(20, 40, 1) eods = np.empty(len(throw_speed_range)) - for (i, throw_speed) in zip(range(len(throw_speed_range)), throw_speed_range): - m.parameters['throwing_speed'] = throw_speed - simulated_results = m.simulate_to_threshold(events=event, options={'dt':1e-3, 'save_freq':10}) + for i, throw_speed in zip(range(len(throw_speed_range)), throw_speed_range): + m.parameters["throwing_speed"] = throw_speed + simulated_results = m.simulate_to_threshold( + events=event, options={"dt": 1e-3, "save_freq": 10} + ) eods[i] = simulated_results.times[-1] - print('\nFor a reasonable range of throwing speeds, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3))) - sensitivity = (eods[-1]-eods[0])/(throw_speed_range[-1] - throw_speed_range[0]) - print(' - Average sensitivity: {} s per m/s speed'.format(round(sensitivity/100, 6))) + print( + "\nFor a reasonable range of throwing speeds, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / (throw_speed_range[-1] - throw_speed_range[0]) + print( + " - Average sensitivity: {} s per m/s speed".format( + round(sensitivity / 100, 6) + ) + ) print(" - It seems impact time is much more dependent on throwing speed") -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/9a76f1f5e2a41a112b5a7db59f9c240b/sim_powertrain.py b/docs/_downloads/9a76f1f5e2a41a112b5a7db59f9c240b/sim_powertrain.py index 9a298539..bf4dd5b2 100644 --- a/docs/_downloads/9a76f1f5e2a41a112b5a7db59f9c240b/sim_powertrain.py +++ b/docs/_downloads/9a76f1f5e2a41a112b5a7db59f9c240b/sim_powertrain.py @@ -2,11 +2,12 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a powertrain being simulated for a set amount of time. +Example of a powertrain being simulated for a set amount of time. """ from progpy.models import Powertrain, ESC, DCMotor + def run_example(): # Create a model object esc = ESC() @@ -15,16 +16,16 @@ def run_example(): # Define future loading function - 100% duty all the time def future_loading(t, x=None): - return powertrain.InputContainer({ - 'duty': 1, - 'v': 23 - }) - + return powertrain.InputContainer({"duty": 1, "v": 23}) + # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - simulated_results = powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + simulated_results = powertrain.simulate_to( + 2, future_loading, dt=2e-5, save_freq=0.1, print=True + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py b/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py index 221e5fed..21147dd8 100644 --- a/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py +++ b/docs/_downloads/9af9bdc48da233bdafa07c9ecb46568e/basic_example_battery.py @@ -2,13 +2,13 @@ """ This example extends the "basic example" to perform a state estimation and prediction with uncertainty given a more complicated model. Models, state estimators, and predictors can be switched out. See documentation nasa.github.io/progpy for description of options - + Method: An instance of the BatteryCircuit model in progpy is created, and the prediction process is achieved in three steps: 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate 2) Prediction of future states (with uncertainty) and the times at which the event threshold will be reached 3) Metrics tools are used to further investigate the results of prediction -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time event is predicted to occur (with uncertainty) iii) Various prediction metrics @@ -29,31 +29,30 @@ from progpy.metrics import prob_success + def run_example(): # Step 1: Setup model & future loading # Measurement noise - R_vars = { - 't': 2, - 'v': 0.02 - } - batt = Battery(process_noise = 0.25, measurement_noise = R_vars) + R_vars = {"t": 2, "v": 0.02} + batt = Battery(process_noise=0.25, measurement_noise=R_vars) # Creating the input containers outside of the function accelerates prediction loads = [ - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 1}), - batt.InputContainer({'i': 4}), - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 3}) + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 1}), + batt.InputContainer({"i": 4}), + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 3}), ] - def future_loading(t, x = None): - # Variable (piece-wise) future loading scheme - if (t < 600): + + def future_loading(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 600: return loads[0] - elif (t < 900): + elif t < 900: return loads[1] - elif (t < 1800): + elif t < 1800: return loads[2] - elif (t < 3000): + elif t < 3000: return loads[3] return loads[-1] @@ -64,22 +63,24 @@ def future_loading(t, x = None): # Step 2a: Setup filt = StateEstimator(batt, initial_state) - + # Step 2b: Print & Plot Prior State print("Prior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) - fig = filt.x.plot_scatter(label='prior') + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + fig = filt.x.plot_scatter(label="prior") # Step 2c: Perform state estimation step - example_measurements = batt.OutputContainer({'t': 32.2, 'v': 3.915}) + example_measurements = batt.OutputContainer({"t": 32.2, "v": 3.915}) t = 0.1 u = future_loading(t) filt.estimate(t, u, example_measurements) # Step 2d: Print & Plot Resulting Posterior State print("\nPosterior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) - filt.x.plot_scatter(fig=fig, label='posterior') # Add posterior state to figure from prior state + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + filt.x.plot_scatter( + fig=fig, label="posterior" + ) # Add posterior state to figure from prior state # Note: in a prognostic application the above state estimation step would be repeated each time # there is new data. Here we're doing one step to demonstrate how the state estimator is used @@ -94,13 +95,16 @@ def future_loading(t, x = None): NUM_SAMPLES = 25 STEP_SIZE = 0.1 SAVE_FREQ = 100 # How often to save results - mc_results = mc.predict(filt.x, future_loading, n_samples = NUM_SAMPLES, dt=STEP_SIZE, save_freq = SAVE_FREQ) - print('ToE', mc_results.time_of_event.mean) + mc_results = mc.predict( + filt.x, future_loading, n_samples=NUM_SAMPLES, dt=STEP_SIZE, save_freq=SAVE_FREQ + ) + print("ToE", mc_results.time_of_event.mean) # Step 3c: Analyze the results # Note: The results of a sample-based prediction can be accessed by sample, e.g., from progpy.predictors import UnweightedSamplesPrediction + if isinstance(mc_results, UnweightedSamplesPrediction): states_sample_1 = mc_results.states[1] # now states_sample_1[n] corresponds to times[n] for the first sample @@ -110,39 +114,60 @@ def future_loading(t, x = None): # now you have all the samples corresponding to times[1] # Print Results - print('Results: ') + print("Results: ") for i, time in enumerate(mc_results.times): - print('\nt = {}'.format(time)) - print('\tu = {}'.format(mc_results.inputs.snapshot(i).mean)) - print('\tx = {}'.format(mc_results.states.snapshot(i).mean)) - print('\tz = {}'.format(mc_results.outputs.snapshot(i).mean)) - print('\tevent state = {}'.format(mc_results.event_states.snapshot(i).mean)) + print("\nt = {}".format(time)) + print("\tu = {}".format(mc_results.inputs.snapshot(i).mean)) + print("\tx = {}".format(mc_results.states.snapshot(i).mean)) + print("\tz = {}".format(mc_results.outputs.snapshot(i).mean)) + print("\tevent state = {}".format(mc_results.event_states.snapshot(i).mean)) # You can also access the final state (of type UncertainData), like so: final_state = mc_results.time_of_event.final_state - print('Final state @EOD: ', final_state['EOD'].mean) - + print("Final state @EOD: ", final_state["EOD"].mean) + # You can also use the metrics package to generate some useful metrics on the result of a prediction print("\nEOD Prediction Metrics") - print('\tPortion between 3005.2 and 3005.6: ', mc_results.time_of_event.percentage_in_bounds([3005.2, 3005.6])) - print('\tAssuming ground truth 3002.25: ', mc_results.time_of_event.metrics(ground_truth=3005.25)) - print('\tP(Success) if mission ends at 3002.25: ', prob_success(mc_results.time_of_event, 3005.25)) - - # Plot state transition + print( + "\tPortion between 3005.2 and 3005.6: ", + mc_results.time_of_event.percentage_in_bounds([3005.2, 3005.6]), + ) + print( + "\tAssuming ground truth 3002.25: ", + mc_results.time_of_event.metrics(ground_truth=3005.25), + ) + print( + "\tP(Success) if mission ends at 3002.25: ", + prob_success(mc_results.time_of_event, 3005.25), + ) + + # Plot state transition # Here we will plot the states at t0, 25% to ToE, 50% to ToE, 75% to ToE, and ToE - fig = mc_results.states.snapshot(0).plot_scatter(label = "t={} s".format(int(mc_results.times[0]))) # 0 - quarter_index = int(len(mc_results.times)/4) - mc_results.states.snapshot(quarter_index).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index]))) # 25% - mc_results.states.snapshot(quarter_index*2).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*2]))) # 50% - mc_results.states.snapshot(quarter_index*3).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[quarter_index*3]))) # 75% - mc_results.states.snapshot(-1).plot_scatter(fig = fig, label = "t={} s".format(int(mc_results.times[-1]))) # 100% + fig = mc_results.states.snapshot(0).plot_scatter( + label="t={} s".format(int(mc_results.times[0])) + ) # 0 + quarter_index = int(len(mc_results.times) / 4) + mc_results.states.snapshot(quarter_index).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index])) + ) # 25% + mc_results.states.snapshot(quarter_index * 2).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 2])) + ) # 50% + mc_results.states.snapshot(quarter_index * 3).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 3])) + ) # 75% + mc_results.states.snapshot(-1).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[-1])) + ) # 100% mc_results.time_of_event.plot_hist() - + # Step 4: Show all plots import matplotlib.pyplot as plt # For plotting + plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/9c8e9f4d6db817c0bef52c29c3dbca21/01_Simulation.ipynb b/docs/_downloads/9c8e9f4d6db817c0bef52c29c3dbca21/01_Simulation.ipynb index c0ba03db..fd5a4e27 100644 --- a/docs/_downloads/9c8e9f4d6db817c0bef52c29c3dbca21/01_Simulation.ipynb +++ b/docs/_downloads/9c8e9f4d6db817c0bef52c29c3dbca21/01_Simulation.ipynb @@ -6,22 +6,52 @@ "source": [ "# 1. Simulating with Prognostics Models\n", "\n", - "One of the most basic of functions for models is simulation. Simulation is the process of predicting the evolution of [system's state](https://nasa.github.io/progpy/glossary.html#term-state) with time. Simulation is the foundation of prediction (see 9. Prediction). Unlike full prediction, simulation does not include uncertainty in the state and other product (e.g., [output](https://nasa.github.io/progpy/glossary.html#term-output)) representation.\n", + "One of the most basic of functions for models is simulation. Simulation is the process of predicting the evolution of a [system's state](https://nasa.github.io/progpy/glossary.html#term-state) with time. Simulation is the foundation of prediction (see __[08 Prediction](08_Prediction.ipynb)__). Unlike full prediction, simulation does not include uncertainty in the state and other product (e.g., [output](https://nasa.github.io/progpy/glossary.html#term-output)) representation.\n", "\n", - "The first section introduces simulating to a specific time (e.g., 3 seconds), using the `simulate_to` method. The second section introduces the concept of simulating until a threshold is met rather than a defined time, using `simulate_to_threshold`. The third section makes simulation more concrete with the introduction of [future loading](https://nasa.github.io/progpy/glossary.html#term-future-load). The sections following these introduce various advanced features that can be used in simulation.\n", + "In this notebook, we will introduce simulating to a specific time (e.g., 3 seconds) using the `simulate_to` method and simulating until a threshold is met (rather than a defined time) using `simulate_to_threshold`. We will also explore how to make simulations more concrete with [future loading](https://nasa.github.io/progpy/glossary.html#term-future-load) and other advanced features.\n", "\n", - "Note: Before running this example make sure you have ProgPy installed and up to date." + "***Note**: Before running this example make sure you have [ProgPy installed](https://nasa.github.io/progpy/#installing-progpy) and up to date.*" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Basic Simulation to a Time](#Basic-Simulation-to-a-Time)\n", + "* [Simulating to Threshold](#Simulating-to-Threshold)\n", + "* [Future Loading](#Future-Loading)\n", + " * [Piecewise Load](#Piecewise-Load)\n", + " * [Moving Average](#Moving-Average)\n", + " * [Gaussian Noise in Loading](#Gaussian-Noise-in-Loading)\n", + " * [Custom Load Profiles](#Custom-Load-Profiles)\n", + "* [Step Size](#Step-Size)\n", + " * [Basic Step Size](#Basic-Step-Size)\n", + " * [Dynamic Step Size](#Dynamic-Step-Size)\n", + " * [Custom Step Size](#Custom-Step-Size)\n", + "* [Parameters](#Parameters)\n", + "* [Noise](#Noise)\n", + "* [Vectorized Simulation](#Vectorized-Simulation)\n", + "* [Configuring Simulation](#Configuring-Simulation)\n", + " * [Simulating from a Known Time](#Simulating-From-a-Known-Time)\n", + " * [Integration Method](#Integration-Method)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basic Simulation to a Time" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Basic Simulation to a Time\n", - "\n", "Let's go through a basic example simulating a model to a specific point in time. In this case we are using the ThrownObject model. ThrownObject is a basic model of an object being thrown up into the air (with resistance) and returning to the ground.\n", "\n", - "First we import the model from ProgPy's models subpackage (see 3. Included Models) and create a model instance." + "First, we import the model from ProgPy's models subpackage (see __[03 Existing Models](03_Existing%20Models.ipynb)__) and create a model instance." ] }, { @@ -31,6 +61,7 @@ "outputs": [], "source": [ "from progpy.models import ThrownObject\n", + "\n", "m = ThrownObject()" ] }, @@ -57,8 +88,8 @@ "It's that simple! We've simulated the model forward three seconds. Let's look in a little more detail at the returned results. \n", "\n", "Simulation results consists of 5 different types of information, described below:\n", - "* **times**: the time corresponding to each value.\n", - "* **[inputs](https://nasa.github.io/progpy/glossary.html#term-input)**: Control or loading applied to the system being modeled (e.g., current drawn from a battery). Input is frequently denoted by u.\n", + "* **times**: Time corresponding to each value.\n", + "* **[inputs](https://nasa.github.io/progpy/glossary.html#term-input)**: Control or loading applied to the system being modeled (e.g., current drawn from a battery). Input is frequently denoted by `u`.\n", "* **[states](https://nasa.github.io/progpy/glossary.html#term-state)**: Internal variables (typically hidden states) used to represent the state of the system. Can be same as inputs or outputs but do not have to be. State is frequently denoted as `x`.\n", "* **[outputs](https://nasa.github.io/progpy/glossary.html#term-output)**: Measured sensor values from a system (e.g., voltage and temperature of a battery). Can be estimated from the system state. Output is frequently denoted by `z`.\n", "* **[event_states](https://nasa.github.io/progpy/glossary.html#term-event-state)**: Progress towards [event](https://nasa.github.io/progpy/glossary.html#term-event) occurring. Defined as a number where an event state of 0 indicates the event has occurred and 1 indicates no progress towards the event (i.e., fully healthy operation for a failure event). For a gradually occurring event (e.g., discharge) the number will progress from 1 to 0 as the event nears. In prognostics, event state is frequently called “State of Health”.\n", @@ -79,7 +110,9 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.outputs.plot()" + "import matplotlib.pyplot as plt\n", + "\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" ] }, { @@ -100,7 +133,7 @@ "outputs": [], "source": [ "results = m.simulate_to(3, save_freq=0.5)\n", - "fig = results.outputs.plot(ylabel='Position (m)')" + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" ] }, { @@ -134,7 +167,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.event_states.plot()" + "fig = results.event_states.plot(xlabel=\"time (s)\")" ] }, { @@ -157,7 +190,14 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.states.plot()" + "x = [state[\"x\"] for state in results.states]\n", + "v = [state[\"v\"] for state in results.states]\n", + "plt.plot(results.times, x, label=\"Position (x) [m]\", color=\"tab:blue\")\n", + "plt.plot(results.times, v, label=\"Velocity (v) [m/s]\", color=\"tab:orange\")\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"state\")\n", + "plt.legend()\n", + "plt.show()" ] }, { @@ -201,6 +241,7 @@ "outputs": [], "source": [ "from progpy.models import ThrownObject\n", + "\n", "m = ThrownObject()" ] }, @@ -220,8 +261,8 @@ "outputs": [], "source": [ "results = m.simulate_to_threshold(save_freq=0.5)\n", - "fig = results.outputs.plot(ylabel='Position (m)')\n", - "fig = results.event_states.plot()" + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" ] }, { @@ -237,7 +278,7 @@ "source": [ "By default, `simulate_to_threshold` simulates until the first event occurs. In this case, that's `falling` (i.e., when the object begins falling). For this model `falling` will always occur before `impact`, but for many models you won't have such a strict ordering of events. \n", "\n", - "For users interested in when a specific event is reached, you can indicate which event(s) you'd like to simulate to using the `events` argument. For example," + "For users interested in when a specific event is reached, you can indicate which event(s) you'd like to simulate to using the `events` argument." ] }, { @@ -246,9 +287,9 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(save_freq=0.5, events='impact')\n", - "fig = results.outputs.plot(ylabel='Position (m)')\n", - "fig = results.event_states.plot()" + "results = m.simulate_to_threshold(save_freq=0.5, events=\"impact\")\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" ] }, { @@ -264,7 +305,7 @@ "source": [ "Frequently users are interested in simulating to a threshold, only if it occurs within some horizon of interest, like a mission time or planning horizon. This is accomplished with the `horizon` keyword argument. \n", "\n", - "For example, if we were only interested in events occuring in the next 7 seconds we could set `horizon` to 7, like below:" + "For example, if we were only interested in events occuring in the next 7 seconds we could set `horizon` to 7." ] }, { @@ -273,9 +314,9 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(save_freq=0.5, events='impact', horizon=7)\n", - "fig = results.outputs.plot(ylabel='Position (m)')\n", - "fig = results.event_states.plot()" + "results = m.simulate_to_threshold(save_freq=0.5, events=\"impact\", horizon=7)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" ] }, { @@ -291,9 +332,9 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(save_freq=0.5, events='impact', horizon=10)\n", - "fig = results.outputs.plot(ylabel='Position (m)')\n", - "fig = results.event_states.plot()" + "results = m.simulate_to_threshold(save_freq=0.5, events=\"impact\", horizon=10)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")\n", + "fig = results.event_states.plot(xlabel=\"time (s)\")" ] }, { @@ -302,13 +343,8 @@ "source": [ "The 7 and 10 second horizon is used as an example. In most cases, the simulation horizon will be much longer. For example, you can imagine a user who's interested in prognostics for a one hour drone flight might set the horizon to a little over an hour. A user who has a month-long maintenance scheduling window might chose a horizon of one month. \n", "\n", - "It is good practice to include a horizon with most simulations to prevent simulations continuing indefinitely for the case where the event never happens." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ + "It is good practice to include a horizon with most simulations to prevent simulations continuing indefinitely for the case where the event never happens.\n", + "\n", "One final note: you can also use the print and progress options to track progress during long simulations, like below:" ] }, @@ -318,20 +354,17 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(save_freq=0.5, events='impact', print=True, progress=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For most users running this in Jupyter notebook, the output will be truncated, but it gives an idea of what would be shown when selecting these options." + "results = m.simulate_to_threshold(\n", + " save_freq=0.5, events=\"impact\", print=True, progress=True\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "For most users running this in Jupyter notebook, the output will be truncated, but it gives an idea of what would be shown when selecting these options.\n", + "\n", "In this example we specified events='impact' to indicate that simulation should stop when the specified event 'impact' is met. By default, the simulation will stop when the first of the specified events occur. If you dont specify any events, all model events will be included (in this case ['falling', 'impact']). This means that without specifying events, execution would have ended early, when the object starts falling, like below:" ] }, @@ -342,16 +375,16 @@ "outputs": [], "source": [ "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1)\n", - "print('Last timestep: ', results.times[-1])" + "print(\"Last timestep: \", results.times[-1])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that simulation stopped at around 3.8seconds, about when the object starts falling. \n", + "Note that simulation stopped at around 3.8 seconds, about when the object starts falling.\n", "\n", - "Alternately, if we would like to execute until all events have occurred we can use the `event_strategy` argument, like below:" + "Alternatively, if we would like to execute until all events have occurred, we can use the `event_strategy` argument, which specifies the strategy for stopping evaluation. The default value is `first`, but we can change it to `all`." ] }, { @@ -360,22 +393,46 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1, event_strategy='all')\n", - "print('Last timestep: ', results.times[-1])" + "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1, event_strategy=\"all\")\n", + "print(\"Last timestep: \", results.times[-1])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Not the simulation stopped at around 7.9 seconds, when the last of the events occured ('impact')" + "Note the simulation stopped at around 7.9 seconds, when the last of the events occurred ('impact').\n", + "\n", + "We can also specify `event_strategy` to be a custom function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from numpy import all\n", + "\n", + "\n", + "# Custom function that stops when all objects impact ground\n", + "def thresholds_met_eqn(thresholds_met):\n", + " return all(thresholds_met[\"impact\"])\n", + "\n", + "\n", + "results = m.simulate_to_threshold(\n", + " save_freq=0.5, dt=0.1, event_strategy=thresholds_met_eqn\n", + ")\n", + "print(\"Last timestep: \", results.times[-1])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This is a basic example of simulating to an event. However, this is still just an example. Most models will have some form of input or loading. Simulating these models is described in the following section. The remainder of the sections go through various features for customizing simulation further." + "Again, we can see that the simulation stopped at around 7.9 seconds, when the last of the events occurred ('impact').\n", + "\n", + "This is a basic example of simulating to an event. However, this is still just an example. Most models will have some form of input or loading. Simulating these models is described in the following section. The remainder of the sections go through various features for further customizing simulations." ] }, { @@ -391,7 +448,7 @@ "source": [ "The previous examples feature a simple ThrownObject model, which does not have any inputs. Unlike ThrownObject, most prognostics models have some sort of [input](https://nasa.github.io/progpy/glossary.html#term-input). The input is some sort of control or loading applied to the system being modeled. In this section we will describe how to simulate a model which features an input.\n", "\n", - "In this example we will be using the BatteryCircuit model from the models subpackage (see 3. Included Models). This is a simple battery discharge model where the battery is represented by an equivalent circuit.\n", + "In this example we will be using the BatteryCircuit model from the models subpackage (see __[03 Existing Models](03_Existing%20Models.ipynb)__). This is a simple battery discharge model where the battery is represented by an equivalent circuit.\n", "\n", "Like the past examples, we start by importing and creating the model." ] @@ -403,6 +460,7 @@ "outputs": [], "source": [ "from progpy.models import BatteryCircuit\n", + "\n", "m = BatteryCircuit()" ] }, @@ -419,8 +477,8 @@ "metadata": {}, "outputs": [], "source": [ - "print('outputs:', m.outputs)\n", - "print('inputs:', m.inputs)" + "print(\"outputs:\", m.outputs)\n", + "print(\"inputs:\", m.inputs)" ] }, { @@ -436,9 +494,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Piecewise load\n", - "\n", - "For the first example, we define a piecewise loading profile using the `progpy.loading.Piecewise` class. This is one of the most common loading profiles. First we import the class from the loading subpackage" + "### Piecewise Load" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For the first example, we define a piecewise loading profile using the `progpy.loading.Piecewise` class. This is one of the most common loading profiles. First we import the class from the loading subpackage and matplotlib for graphing." ] }, { @@ -456,7 +519,7 @@ "source": [ "Next, we define a loading profile. Piecewise loader takes 3 arguments: 1. the model InputContainer, 2. times and 3. loads. Each of these are explained in more detail below.\n", "\n", - "The model input container is a class for representing the input for a model. It's a class attribute for every model, and is specific to that model. It can be found at m.InputContainer. For example," + "The model input container is a class for representing the input for a model. It's a class attribute for every model, and is specific to that model. It can be found at m.InputContainer." ] }, { @@ -472,7 +535,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "InputContainers are initialized with either a dictionary or a column vector, for example:" + "InputContainers are initialized with either a dictionary or a column vector." ] }, { @@ -481,9 +544,10 @@ "metadata": {}, "outputs": [], "source": [ - "print(m.InputContainer({'i': 3}))\n", + "print(m.InputContainer({\"i\": 3}))\n", "import numpy as np\n", - "print(m.InputContainer(np.vstack((2.3, ))))" + "\n", + "print(m.InputContainer(np.vstack((2.3,))))" ] }, { @@ -504,9 +568,10 @@ "outputs": [], "source": [ "loading = Piecewise(\n", - " InputContainer=m.InputContainer,\n", - " times=[600, 900, 1800, 3000],\n", - " values={'i': [2, 1, 4, 2, 3]})" + " InputContainer=m.InputContainer,\n", + " times=[600, 900, 1800, 3000],\n", + " values={\"i\": [2, 1, 4, 2, 3]},\n", + ")" ] }, { @@ -515,7 +580,7 @@ "source": [ "In this case, the current drawn (`i`) is 2 amps until t is 600 seconds, then it is 1 for the next 300 seconds (until 900 seconds), etc. The \"default load\" is 3, meaning that after the last time has passed (3000 seconds) a current of 3 will be drawn. \n", "\n", - "Now that we have this load profile, let's run a simulation with our model" + "Now that we have this load profile, let's run a simulation with our model." ] }, { @@ -531,7 +596,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Let's take a look at the inputs to the model" + "Let's take a look at the inputs to the model." ] }, { @@ -540,7 +605,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.inputs.plot(ylabel=\"Current Draw (amps)\")" + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" ] }, { @@ -558,7 +623,16 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.outputs.plot(compact=False)" + "def print_battery_output_plots(results):\n", + " fig = results.outputs.plot(\n", + " keys=[\"t\"], xlabel=\"time (s)\", ylabel=\"temperature (K)\", figsize=(10, 4)\n", + " )\n", + " fig2 = results.outputs.plot(\n", + " keys=[\"v\"], xlabel=\"time (s)\", ylabel=\"voltage (V)\", figsize=(10, 4)\n", + " )\n", + "\n", + "\n", + "print_battery_output_plots(results)" ] }, { @@ -581,7 +655,7 @@ "source": [ "Another common loading scheme is the moving-average load. This loading scheme assumes that the load will continue like it's seen in the past. This is useful when you don't know the exact load, but you expect it to be consistent.\n", "\n", - "Like with Piecewise loading, the first step it to import the loading class. In this case, `progpy.loading.MovingAverage`" + "Like with Piecewise loading, the first step it to import the loading class. In this case, `progpy.loading.MovingAverage`." ] }, { @@ -597,7 +671,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next we create the moving average loading object, passing in the InputContainer" + "Next we create the moving average loading object, passing in the InputContainer." ] }, { @@ -623,16 +697,16 @@ "outputs": [], "source": [ "measured_loads = [4, 4.5, 4.0, 4, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2]\n", - " \n", + "\n", "for load in measured_loads:\n", - " loading.add_load({'i': load})" + " loading.add_load({\"i\": load})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In practice the add_load method should be called whenever there's new input (i.e., load) information. The MovingAverage load estimator averages over a window of elements, configurable at construction using the window argument (e.g., MovingAverage(m.InputContainer, window=12))\n", + "In practice the add_load method should be called whenever there's new input (i.e., load) information. The MovingAverage load estimator averages over a window of elements, configurable at construction using the window argument (e.g., MovingAverage(m.InputContainer, window=12)).\n", "\n", "Now the configured load estimator can be used in simulation. " ] @@ -650,7 +724,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's take a look at the resulting input current." + "Let's take a look at the resulting input current." ] }, { @@ -659,14 +733,14 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.inputs.plot()" + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that the loading is a constant around 2, this is because the larger loads (~4 amps) are outside of the averaging window. Here are the resulting outputs" + "Note that the loading is a constant around 2, this is because the larger loads (~4 amps) are outside of the averaging window. Here are the resulting outputs:" ] }, { @@ -675,7 +749,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.outputs.plot(compact=False)" + "print_battery_output_plots(results)" ] }, { @@ -698,7 +772,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Typically, users have an idea of what loading will look like, but there is some uncertainty. Future load estimates are hardly ever known exactly. This is where load wrappers like the `progpy.loading.GaussianNoiseLoadWrapper` come into play. The GaussianNoiseLoadWrapper wraps around another load profile, adding a random amount of noise, sampled from a Gaussian distribution, at each step. This will show some variability in simulation, but this becomes more important in prediction (see 9. Prediction).\n", + "Typically, users have an idea of what loading will look like, but there is some uncertainty. Future load estimates are hardly ever known exactly. This is where load wrappers like the `progpy.loading.GaussianNoiseLoadWrapper` come into play. The GaussianNoiseLoadWrapper wraps around another load profile, adding a random amount of noise, sampled from a Gaussian distribution, at each step. This will show some variability in simulation, but this becomes more important in prediction (see __[08 Prediction](08_Prediction.ipynb)__).\n", "\n", "In this example we will repeat the Piecewise load example, this time using the GaussianNoiseLoadWrapper to represent our uncertainty in our future load estimate. \n", "\n", @@ -712,10 +786,12 @@ "outputs": [], "source": [ "from progpy.loading import Piecewise, GaussianNoiseLoadWrapper\n", + "\n", "loading = Piecewise(\n", - " InputContainer=m.InputContainer,\n", - " times=[600, 900, 1800, 3000],\n", - " values={'i': [2, 1, 4, 2, 3]})" + " InputContainer=m.InputContainer,\n", + " times=[600, 900, 1800, 3000],\n", + " values={\"i\": [2, 1, 4, 2, 3]},\n", + ")" ] }, { @@ -750,7 +826,7 @@ "outputs": [], "source": [ "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", - "fig = results.inputs.plot()" + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" ] }, { @@ -767,7 +843,7 @@ "outputs": [], "source": [ "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", - "fig = results.inputs.plot()" + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" ] }, { @@ -783,7 +859,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.outputs.plot(compact=False)" + "print_battery_output_plots(results)" ] }, { @@ -803,11 +879,11 @@ "source": [ "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2, seed=2000)\n", "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", - "fig = results.inputs.plot()\n", + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")\n", "\n", "loading_with_noise = GaussianNoiseLoadWrapper(loading, 0.2, seed=2000)\n", "results = m.simulate_to_threshold(loading_with_noise, save_freq=100)\n", - "fig = results.inputs.plot()" + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" ] }, { @@ -816,14 +892,14 @@ "source": [ "The load profiles in the two examples above are identical because they share the same random seed.\n", "\n", - "In this section we introduced the concept of NoiseWrappers and how they are used to represent uncertainty in future loading. This concept is especially important when used with prediction (see 9. Prediction). A GaussianNoiseLoadWrapper was used with a Piecewise loading profile to demonstrate it, but NoiseWrappers can be applied to any loading object or function, including the advanced profiles introduced in the next section." + "In this section we introduced the concept of NoiseWrappers and how they are used to represent uncertainty in future loading. This concept is especially important when used with prediction (see __[08 Prediction](08_Prediction.ipynb)__). A GaussianNoiseLoadWrapper was used with a Piecewise loading profile to demonstrate it, but NoiseWrappers can be applied to any loading object or function, including the advanced profiles introduced in the next section." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Custom load profiles" + "### Custom Load Profiles" ] }, { @@ -846,11 +922,14 @@ "outputs": [], "source": [ "from numpy.random import normal\n", + "\n", "base_load = 2 # Base load (amps)\n", "std_slope = 1e-4 # Derivative of standard deviation with time\n", + "\n", + "\n", "def loading(t, x=None):\n", " std = std_slope * t\n", - " return m.InputContainer({'i': normal(base_load, std)})" + " return m.InputContainer({\"i\": normal(base_load, std)})" ] }, { @@ -869,7 +948,7 @@ "outputs": [], "source": [ "results = m.simulate_to_threshold(loading, save_freq=100)\n", - "fig = results.inputs.plot()" + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" ] }, { @@ -887,7 +966,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.outputs.plot(compact=False)" + "print_battery_output_plots(results)" ] }, { @@ -905,13 +984,14 @@ "metadata": {}, "outputs": [], "source": [ - "normal_load = m.InputContainer({'i': 2.7})\n", - "low_power_load = m.InputContainer({'i': 1.9})\n", + "normal_load = m.InputContainer({\"i\": 2.7})\n", + "low_power_load = m.InputContainer({\"i\": 1.9})\n", + "\n", "\n", "def loading(t, x=None):\n", " if x is not None:\n", " # State is provided\n", - " soc = m.event_state(x)['EOD']\n", + " soc = m.event_state(x)[\"EOD\"]\n", " return normal_load if soc > 0.25 else low_power_load\n", " return normal_load" ] @@ -932,7 +1012,7 @@ "outputs": [], "source": [ "results = m.simulate_to_threshold(loading, save_freq=100)\n", - "fig = results.inputs.plot()" + "fig = results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")" ] }, { @@ -950,7 +1030,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig = results.outputs.plot(compact=False)" + "print_battery_output_plots(results)" ] }, { @@ -977,7 +1057,7 @@ "\n", "In this section we will introduce the concept of setting simulation step size (`dt`) and discuss some considerations when selecting step sizes.\n", "\n", - "For this section we will use the `progpy.models.ThrownObject model` (see 3. Included models), imported and created below." + "For this section we will use the `progpy.models.ThrownObject model` (see __[03 Existing Models](03_Existing%20Models.ipynb)__), imported and created below." ] }, { @@ -987,6 +1067,7 @@ "outputs": [], "source": [ "from progpy.models import ThrownObject\n", + "\n", "m = ThrownObject()" ] }, @@ -1012,11 +1093,8 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(\n", - " events='impact',\n", - " dt=2.5,\n", - " save_freq=2.5)\n", - "fig = results.outputs.plot(ylabel='Position (m)')" + "results = m.simulate_to_threshold(events=\"impact\", dt=2.5, save_freq=2.5)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" ] }, { @@ -1034,11 +1112,8 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(\n", - " events='impact',\n", - " dt=0.25,\n", - " save_freq=0.25)\n", - "fig = results.outputs.plot(ylabel='Position (m)')" + "results = m.simulate_to_threshold(events=\"impact\", dt=0.25, save_freq=0.25)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" ] }, { @@ -1049,7 +1124,7 @@ "\n", "All simulations are approximations. The example with the larger step size accumulates more error in integration. The second example (with a smaller step size) is more accurate to the actual model behavior.\n", "\n", - "Now let's decrease the step size even more" + "Now let's decrease the step size even more." ] }, { @@ -1058,11 +1133,8 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(\n", - " events='impact',\n", - " dt=0.05,\n", - " save_freq=0.05)\n", - "fig = results.outputs.plot(ylabel='Position (m)')" + "results = m.simulate_to_threshold(events=\"impact\", dt=0.05, save_freq=0.05)\n", + "fig = results.outputs.plot(xlabel=\"time (s)\", ylabel=\"position (m)\")" ] }, { @@ -1071,7 +1143,7 @@ "source": [ "The resulting output is different than the 0.25 second step size run, but not by much. What you see here is the diminishing returns in decreasing step size.\n", "\n", - "The smaller the step size, the more computational resources required to simulate it. This doesn't matter as much for simulating this simple model over a short horizon, but becomes very important when performing prediction (see 9. Prediction), using a complex model with a long horizon, or when operating in a computationally constrained environment (e.g., embedded)." + "The smaller the step size, the more computational resources required to simulate it. This doesn't matter as much for simulating this simple model over a short horizon, but becomes very important when performing prediction (see __[08 Prediction](08_Prediction.ipynb)__), using a complex model with a long horizon, or when operating in a computationally constrained environment (e.g., embedded)." ] }, { @@ -1094,11 +1166,8 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(\n", - " events='impact',\n", - " dt=1,\n", - " save_freq=1.5)\n", - "print('Times saved: ', results.times)" + "results = m.simulate_to_threshold(events=\"impact\", dt=1, save_freq=1.5)\n", + "print(\"Times saved: \", results.times)" ] }, { @@ -1116,11 +1185,8 @@ "metadata": {}, "outputs": [], "source": [ - "results = m.simulate_to_threshold(\n", - " events='impact',\n", - " dt=('auto', 1),\n", - " save_freq=1.5)\n", - "print('Times saved: ', results.times)" + "results = m.simulate_to_threshold(events=\"impact\", dt=(\"auto\", 1), save_freq=1.5)\n", + "print(\"Times saved: \", results.times)" ] }, { @@ -1157,11 +1223,12 @@ "def next_time(t, x):\n", " # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.25, then 0.25\n", " event_state = m.event_state(x)\n", - " if event_state['impact'] < 0.25:\n", + " if event_state[\"impact\"] < 0.25:\n", " return 0.25\n", " return 1\n", "\n", - "results=m.simulate_to_threshold(dt=next_time, save_freq= 0.25, events='impact')\n", + "\n", + "results = m.simulate_to_threshold(dt=next_time, save_freq=0.25, events=\"impact\")\n", "\n", "print(results.times)" ] @@ -1186,7 +1253,7 @@ "source": [ "All the previous sections used a model with default settings. This is hardly ever the case. A model will have to be configured to represent the actual system. For example, the BatteryCircuit default parameters are for a 18650 battery tested in NASA's SHARP lab. If you're using the model for a system other than that one battery, you will need to update the parameters.\n", "\n", - "The parameters available are specific to the system in question. See 3. Included Models for a more detailed description of these. For example, for the BatteryCircuit model, parameters include battery capacity, internal resistance, and other electrical characteristics.\n", + "The parameters available are specific to the system in question. See __[03 Existing Models](03_Existing%20Models.ipynb)__ for a more detailed description of these. For example, for the BatteryCircuit model, parameters include battery capacity, internal resistance, and other electrical characteristics.\n", "\n", "In this section we will adjust the parameters for the ThrownObject Model, observing how that changes system behavior." ] @@ -1198,6 +1265,8 @@ "outputs": [], "source": [ "from progpy.models import ThrownObject\n", + "import matplotlib.pyplot as plt\n", + "\n", "m = ThrownObject()" ] }, @@ -1240,16 +1309,24 @@ "metadata": {}, "outputs": [], "source": [ - "results1 = m.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n", - "fig = results1.outputs.plot(title='default')\n", + "default_throw = m.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "m[\"throwing_speed\"] = 10\n", + "slow_throw = m.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "m[\"throwing_speed\"] = 80\n", + "fast_throw = m.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", "\n", - "m['throwing_speed'] = 10\n", - "results2 = m.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n", - "fig = results2.outputs.plot(title='slow')\n", + "plt.figure(figsize=(10, 8))\n", "\n", - "m['throwing_speed'] = 80\n", - "results3 = m.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n", - "fig = results3.outputs.plot(title='fast')" + "plt.plot(default_throw.times, default_throw.outputs, label=\"Default\", color=\"tab:blue\")\n", + "plt.plot(slow_throw.times, slow_throw.outputs, label=\"Slow\", color=\"tab:orange\")\n", + "plt.plot(fast_throw.times, fast_throw.outputs, label=\"Fast\", color=\"tab:green\")\n", + "\n", + "plt.legend()\n", + "plt.title(\"Simulation with throws at different speeds\", pad=10)\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"position (m)\")" ] }, { @@ -1266,12 +1343,22 @@ "outputs": [], "source": [ "m_e = ThrownObject(g=-9.81) # Earth gravity\n", - "results_earth = m_e.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n", - "fig = results_earth.outputs.plot(title='Earth')\n", + "results_earth = m_e.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", "\n", "m_j = ThrownObject(g=-24.79) # Jupiter gravity\n", - "results_jupiter = m_j.simulate_to_threshold(events='impact', dt=0.1, save_freq=0.1)\n", - "fig = results_jupiter.outputs.plot(title='Jupiter')" + "results_jupiter = m_j.simulate_to_threshold(events=\"impact\", dt=0.1, save_freq=0.1)\n", + "\n", + "plt.figure(figsize=(10, 8))\n", + "\n", + "plt.plot(results_earth.times, results_earth.outputs, label=\"Earth\", color=\"tab:blue\")\n", + "plt.plot(\n", + " results_jupiter.times, results_jupiter.outputs, label=\"Jupiter\", color=\"tab:orange\"\n", + ")\n", + "\n", + "plt.legend()\n", + "plt.title(\"Simulation with throws under Earth's and Jupiter's gravity\", pad=10)\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"position (m)\")" ] }, { @@ -1280,39 +1367,784 @@ "source": [ "Model parameters are used to configure a model to accurately describe the system of interest.\n", "\n", - "For a simple system like the ThrownObject, model parameters are simple and measurable. For most systems, there are many parameters that are difficult to estimate. For these, parameter estimation comes into play. See 2. Parameter Estimation for more details" + "For a simple system like the ThrownObject, model parameters are simple and measurable. For most systems, there are many parameters that are difficult to estimate. For these, parameter estimation comes into play. See __[02 Parameter_Estimation](02_Parameter%20Estimation.ipynb)__ for more details" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Noise\n", - "**A version of this section will be added in release v1.8**" + "## Noise" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Vectorized Simulation\n", - "**A version of this section will be added in release v1.8**" + "It is impossible to have absolute knowledge of future states due to uncertainties in the system. To account for this, we can incorporate uncertainty into a model through the following forms:\n", + "\n", + "* __Process Noise__: Noise representing uncertainty in the model transition (e.g., model or model configuration uncertainty, uncertainty from simplifying assumptions). Applied during state transition.\n", + "* __Measurement Noise__: Noise representing uncertainty in the measurement process (e.g., sensor sensitivity, sensor misalignments, environmental effects). Applied during estimation of outputs from states.\n", + "* __Future Loading Noise__: Noise representing uncertainty in the future loading estimates (e.g., uncertainty from incomplete knowledge of future loading). It is the responsibility of the user to apply Future Loading Noise as appropriate in the supplied future loading method.\n", + "\n", + "Other types of uncertainty will be introduced in __[08 Prediction](08_Prediction.ipynb)__.\n", + "\n", + "In this section, we will be examining multiple approaches for adding process and measurement noise. For an example of future loading noise, please refer to the `GaussianNoiseLoadWrapper` in the [Gaussian Noise in Loading](#gaussian-noise-in-loading) section.\n", + "\n", + "We will start by importing the ThrownObject model for simulation and matplotlib for graphing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import ThrownObject\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib.lines import Line2D" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Configuring Simulation\n", - "**A version of this notebook will be added in release v1.8, including:**\n", - "* t0, x\n", - "* integration_method" + "We will now define the configuration of the simulation and some helper functions to print the results. For this example, we will not be passing in a future load since the ThrownObject model has no inputs and we cannot load the system (i.e., we cannot affect it once it's in the air)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"events\": \"impact\",\n", + " \"dt\": 0.005,\n", + " \"save_freq\": 0.5,\n", + "}\n", + "\n", + "\n", + "def print_results(simulated_results):\n", + " print(\"states:\")\n", + " for t, x in zip(simulated_results.times, simulated_results.states):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", + "\n", + " print(\"outputs:\")\n", + " for t, x in zip(simulated_results.times, simulated_results.outputs):\n", + " print(\"\\t{:.2f}s: {}\".format(t, x))\n", + "\n", + " # The simulation stopped at impact, so the last element of times is the impact time\n", + " print(\"\\nimpact time: {:.2f}s\".format(simulated_results.times[-1]))\n", + "\n", + "\n", + "def plot_comparison(no_noise_simulation, simulated_results):\n", + " plt.figure(figsize=(10, 8))\n", + "\n", + " print_noise_plot(simulated_results)\n", + " print_no_noise_plot(no_noise_simulation)\n", + "\n", + " color_legend = [\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:blue\",\n", + " linestyle=\"None\",\n", + " label=\"Position (x) [m]\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:orange\",\n", + " linestyle=\"None\",\n", + " label=\"Velocity (v) [m/s]\",\n", + " ),\n", + " ]\n", + "\n", + " linestyle_legend = [\n", + " Line2D([0], [0], color=\"black\", lw=2, linestyle=\"-\", label=\"Noise\"),\n", + " Line2D([0], [0], color=\"black\", lw=2, linestyle=\"--\", label=\"No noise\"),\n", + " ]\n", + "\n", + " plt.legend(handles=color_legend + linestyle_legend, bbox_to_anchor=(1, 1))\n", + " plt.xlabel(\"time (s)\")\n", + " plt.ylabel(\"state\")\n", + "\n", + "\n", + "def print_no_noise_plot(no_noise_simulation):\n", + " no_noise_x = [state[\"x\"] for state in no_noise_simulation.states]\n", + " no_noise_v = [state[\"v\"] for state in no_noise_simulation.states]\n", + "\n", + " plt.plot(\n", + " no_noise_simulation.times,\n", + " no_noise_x,\n", + " label=\"Position (x) [m]\",\n", + " color=\"#155d8d\",\n", + " linestyle=\"dashed\",\n", + " )\n", + " plt.plot(\n", + " no_noise_simulation.times,\n", + " no_noise_v,\n", + " label=\"Velocity (v) [m/s]\",\n", + " color=\"#d65a08\",\n", + " linestyle=\"dashed\",\n", + " )\n", + " plt.legend()\n", + "\n", + "\n", + "def print_noise_plot(simulation):\n", + " noise_x = [state[\"x\"] for state in simulation.states]\n", + " noise_v = [state[\"v\"] for state in simulation.states]\n", + " plt.plot(simulation.times, noise_x, label=\"Position (x) [m]\", color=\"tab:blue\")\n", + " plt.plot(simulation.times, noise_v, label=\"Velocity (v) [m/s]\", color=\"tab:orange\")\n", + " plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start with an example with no noise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = ThrownObject(process_noise=False)\n", + "print(\"Simulation without noise\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "print_no_noise_plot(simulated_results)\n", + "plt.xlabel(\"time (s)\")\n", + "plt.ylabel(\"state\")\n", + "\n", + "plt.title(\"Simulation with no noise\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we see a clean parabola for position and linear decrease in speed. Exactly what we would expect for this model without noise.\n", + "\n", + "Let's save the simulated results from this example into the variable `no_noise_simulation` to use as a comparison reference to the next few examples showing simulations with noise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "no_noise_simulation = simulated_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here is an example of a simulation with normal (i.e., Gaussian) process noise with a standard deviation of 25 applied to every state. Even though this standard deviation is quite high, we'll notice the curves aren't dramatically different due to the small step size. At every step, noise is resampled, so the noise added on a single step may be large but cancelled out over many steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "process_noise = 25\n", + "\n", + "m = ThrownObject(process_noise=process_noise)\n", + "print(\"Simulation with process noise\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\"Simulation with no noise vs. process noise\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note the deviation from the 'no noise' run in both states. Noise is sampled randomly, so if you were to rerun the code above again, you would see a slightly different curve.\n", + "We can also specify different amounts of noise on different states. This is an example of a simulation with more process noise on position than velocity. Here you should see a smooth curve for the velocity and a noisy curve for position." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "process_noise = {\"x\": 30, \"v\": 1}\n", + "\n", + "m = ThrownObject(process_noise=process_noise)\n", + "print(\"Simulation with more process noise on position than velocity\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\n", + " \"Simulation with no noise vs. more process noise on position than velocity\", pad=10\n", + ")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also define the shape of the noise to be uniform or triangular instead of normal. The image below shows the shapes of these distributions. Users might select a different noise shape to better capture the nature and shape of the state transition uncertainty.\n", + "\n", + "![Graphs of normal, triangular, and uniform distributions](distributions.png \"Distributions\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " This example demonstrates a uniform process noise distribution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "process_noise = {\"x\": 30, \"v\": 1}\n", + "process_noise_dist = \"uniform\"\n", + "model_config = {\n", + " \"process_noise_dist\": process_noise_dist,\n", + " \"process_noise\": process_noise,\n", + "}\n", + "\n", + "m = ThrownObject(**model_config)\n", + "print(\"Simulation with uniform process noise distribution\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\"Simulation with no noise vs. uniform process noise distribution\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The above examples all focused on process noise. We can apply measurement noise in the same way. \n", + "\n", + "Since measurement noise is applied during the estimation of the outputs from the states, in this example, we will see that the `x` outputs differ from the `x` states. Unlike the examples with process noise, the `x` states in this simulation are equal to the `x` states and outputs of a simulation without noise, as measurement noise is not applied until later.\n", + "\n", + "In the graph below, we can observe the measurement noise reflected in the `x` outputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "measurement_noise = {\"x\": 10}\n", + "measurement_noise_dist = \"triangular\"\n", + "model_config = {\n", + " \"measurement_noise_dist\": measurement_noise_dist,\n", + " \"measurement_noise\": measurement_noise,\n", + "}\n", + "\n", + "m = ThrownObject(**model_config)\n", + "print(\"Simulation with triangular measurement noise distribution\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "\n", + "plt.figure(figsize=(10, 8))\n", + "\n", + "noise_x = [state[\"x\"] for state in simulated_results.states]\n", + "noise_output_x = [state[\"x\"] for state in simulated_results.outputs]\n", + "no_noise_output_x = [state[\"x\"] for state in no_noise_simulation.outputs]\n", + "\n", + "plt.plot(simulated_results.times, noise_x, label=\"With noise state\", color=\"tab:blue\")\n", + "plt.plot(\n", + " simulated_results.times,\n", + " noise_output_x,\n", + " label=\"With noise output\",\n", + " color=\"tab:purple\",\n", + ")\n", + "plt.plot(\n", + " simulated_results.times,\n", + " no_noise_output_x,\n", + " label=\"No noise output\",\n", + " color=\"black\",\n", + " linestyle=(0, (5, 4)),\n", + ")\n", + "\n", + "plt.legend()\n", + "plt.xlabel(\"time\")\n", + "plt.ylabel(\"position (m)\")\n", + "plt.title(\n", + " \"Simulation with no noise vs. triangular measurement noise distribution\", pad=10\n", + ")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In some cases, users might want to define some custom noise profile. This is especially important for complex cases where the amount of noise changes as a function of state.\n", + "To demonstrate this, we'll demonstrate a scenario where process noise on velocity is proportional to state. This could represent a case where the model is unstable for high velocities. In this example, we will define a helper function `apply_proportional_process_noise`, which will add noise to v that increases as the object is going faster.\n", + "\n", + "If we wanted to apply noise in a replicable manner, we could set the numpy random seed to a fixed value before a run, e.g., `numpy.random.seed(42)`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def apply_proportional_process_noise(self, x, dt=1):\n", + " x[\"v\"] -= dt * 0.5 * x[\"v\"]\n", + " return x\n", + "\n", + "\n", + "model_config = {\"process_noise\": apply_proportional_process_noise}\n", + "\n", + "m = ThrownObject(**model_config)\n", + "print(\"Simulation with proportional noise on velocity\")\n", + "simulated_results = m.simulate_to_threshold(**config)\n", + "\n", + "print_results(simulated_results)\n", + "plot_comparison(no_noise_simulation, simulated_results)\n", + "plt.title(\"Simulation with no noise vs. proportional noise on velocity\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Vectorized Simulation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some models support vectorization, where multiple states are simulated in parallel. This is a more efficient simulation technique than simulating from multiple states in parallel. We will import the ThrownObject model and confirm that the model supports vectorization by checking the `is_vectorized` property." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models.thrown_object import ThrownObject\n", + "\n", + "m = ThrownObject()\n", + "m.is_vectorized" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will be using `simulate_to_threshold` with vectorized states, which are a representation of a system's current conditions. The ThrownObject model will be used to simulate multiple thrown objects. Let's start by getting the necessary imports." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from numpy import array\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will also define a helper function to visualize the four throws." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def print_vectorized_sim_plots(simulated_results):\n", + " plt.figure(figsize=(10, 8))\n", + "\n", + " first_throw_x = [state[\"x\"][0] for state in simulated_results.states]\n", + " first_throw_v = [state[\"v\"][0] for state in simulated_results.states]\n", + "\n", + " second_throw_x = [state[\"x\"][1] for state in simulated_results.states]\n", + " second_throw_v = [state[\"v\"][1] for state in simulated_results.states]\n", + "\n", + " third_throw_x = [state[\"x\"][2] for state in simulated_results.states]\n", + " third_throw_v = [state[\"v\"][2] for state in simulated_results.states]\n", + "\n", + " fourth_throw_x = [state[\"x\"][3] for state in simulated_results.states]\n", + " fourth_throw_v = [state[\"v\"][3] for state in simulated_results.states]\n", + "\n", + " plt.plot(\n", + " simulated_results.times, first_throw_x, color=\"tab:blue\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, first_throw_v, color=\"tab:blue\")\n", + "\n", + " plt.plot(\n", + " simulated_results.times, second_throw_x, color=\"tab:orange\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, second_throw_v, color=\"tab:orange\")\n", + "\n", + " plt.plot(\n", + " simulated_results.times, third_throw_x, color=\"tab:cyan\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, third_throw_v, color=\"tab:cyan\")\n", + "\n", + " plt.plot(\n", + " simulated_results.times, fourth_throw_x, color=\"tab:purple\", linestyle=\"dashed\"\n", + " )\n", + " plt.plot(simulated_results.times, fourth_throw_v, color=\"tab:purple\")\n", + "\n", + " plt.xlabel(\"time (s)\")\n", + " plt.ylabel(\"state\")\n", + "\n", + " color_legend = [\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:blue\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 1\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:orange\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 2\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:cyan\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 3\",\n", + " ),\n", + " Line2D(\n", + " [0],\n", + " [0],\n", + " marker=\"o\",\n", + " markersize=10,\n", + " color=\"tab:purple\",\n", + " linestyle=\"None\",\n", + " label=\"Throw 4\",\n", + " ),\n", + " ]\n", + "\n", + " linestyle_legend = [\n", + " Line2D([0], [0], color=\"black\", lw=2, linestyle=\"-\", label=\"Position (x) [m]\"),\n", + " Line2D(\n", + " [0], [0], color=\"black\", lw=2, linestyle=\"--\", label=\"Velocity (v) [m/s]\"\n", + " ),\n", + " ]\n", + "\n", + " plt.legend(handles=color_legend + linestyle_legend, bbox_to_anchor=(1, 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now set up the vectorized initial state. In this example, we will define 4 throws of varying positions (x) and strengths (v) in `first_state`. We will then simulate to the threshold using this state. We should see the simulation stop once all objects hits the ground since the `event_strategy` is 'all'." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "first_state = {\"x\": array([1.75, 1.8, 1.85, 1.9]), \"v\": array([35, 39, 22, 47])}\n", + "\n", + "m = ThrownObject()\n", + "simulated_results = m.simulate_to_threshold(\n", + " x=first_state,\n", + " events=\"impact\",\n", + " event_strategy=\"all\",\n", + " print=True,\n", + " dt=0.1,\n", + " save_freq=1,\n", + ")\n", + "\n", + "print_vectorized_sim_plots(simulated_results)\n", + "plt.title(\"Vectorized simulation until any object hits the ground\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using a vectorized simulation is more efficient than separately simulating multiple cases. This can be useful when we might need to compare multiple options or if there are a discrete set of possible starting states." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuring Simulation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we will explore other ways to configure simulations. These approaches can be used to configure a simulation even further to match a use case. We will use the Battery model for these examples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit as Battery\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create a model object and define a piecewise future loading function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batt = Battery()\n", + "\n", + "\n", + "def future_loading(t, x=None):\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return batt.InputContainer({\"i\": i})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Simulating From a Known Time" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There may be cases where we want to simulate from a time other than 0. For example, we may have a future loading profile that is a function of time and need to pause our simulation midway (e.g., the results inform a decision) before continuing from where we left off.\n", + "\n", + "To do this, we can adjust `t0`. The following example shows a battery simulation that starts at 700 seconds." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 100,\n", + " \"dt\": 2,\n", + " \"t0\": 700,\n", + "}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)\n", + "\n", + "print(\"First timestamp in simulation :\", simulated_results.times[0])\n", + "\n", + "simulated_results.inputs.plot(xlabel=\"time (s)\", ylabel=\"current draw (amps)\")\n", + "plt.scatter(\n", + " simulated_results.times[0], simulated_results.inputs[0][\"i\"], color=\"red\", zorder=5\n", + ")\n", + "plt.annotate(\n", + " f\"({simulated_results.times[0]}, {simulated_results.inputs[0]['i']})\",\n", + " xy=(\n", + " simulated_results.times[0],\n", + " simulated_results.inputs[0][\"i\"],\n", + " ), # Point to annotate\n", + " xytext=(simulated_results.times[0], simulated_results.inputs[0][\"i\"] + 0.05),\n", + " fontsize=10,\n", + " horizontalalignment=\"center\",\n", + " verticalalignment=\"bottom\",\n", + ")\n", + "plt.show()\n", + "\n", + "simulated_results.outputs.plot(keys=[\"v\"], xlabel=\"time (s)\", ylabel=\"voltage (V)\")\n", + "plt.scatter(\n", + " simulated_results.times[0], simulated_results.outputs[0][\"v\"], color=\"red\", zorder=5\n", + ")\n", + "plt.annotate(\n", + " f\"({simulated_results.times[0]}, {simulated_results.outputs[0]['v']})\",\n", + " xy=(\n", + " simulated_results.times[0],\n", + " simulated_results.outputs[0][\"v\"],\n", + " ), # Point to annotate\n", + " xytext=(simulated_results.times[0], simulated_results.outputs[0][\"v\"] + 0.04),\n", + " fontsize=10,\n", + " horizontalalignment=\"left\",\n", + " verticalalignment=\"top\",\n", + ")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can observe how different input current draws affect the voltage output curve. Generally, the graphs indicate that drawing a higher current leads to a lower voltage." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Integration Method" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Simulation is essentially the process of integrating a model forward with time. By default, a simple Euler integration is used to propagate the model forward. Advanced users can change the numerical integration method to affect the simulation accuracy and runtime. This is done using the `integration_method` argument in `simulate_to()`, `simulate_to_threshold()`, or the model parameters like `m.parameters['integration_method'] = 'rk4'`. Note that the integration method can only be changed for continuous models.\n", + "\n", + "Let's look at an example of simulating with both the default Euler integration method and with the Runge-Kutta fourth-order (RK4) integration method. Since RK4 is a higher-order integration method, it is more accurate than a simple Euler integration. However, it is also more complex and therefore more computationally expensive. Let's compare the results of these two techniques.\n", + "\n", + "First, we'll integrate with a step size of 1. Here, we can see that the two integration techniques are nearly identical. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 10,\n", + " \"dt\": 1,\n", + "}\n", + "\n", + "rk4_config = {\"save_freq\": 10, \"dt\": 1, \"integration_method\": \"rk4\"}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)\n", + "rk4_simulated_results = batt.simulate_to_threshold(future_loading, **rk4_config)\n", + "\n", + "\n", + "def plot_integration_method_comparison(simulated_results, rk4_simulated_results):\n", + " euler_v = [o[\"v\"] for o in simulated_results.outputs]\n", + " rk4_v = [o[\"v\"] for o in rk4_simulated_results.outputs]\n", + "\n", + " plt.plot(simulated_results.times, euler_v)\n", + " plt.plot(simulated_results.times, rk4_v, linestyle=\"dashed\")\n", + " plt.xlabel(\"time (s)\")\n", + " plt.ylabel(\"voltage (V)\")\n", + " plt.legend([\"Euler\", \"RK4\"])\n", + "\n", + "\n", + "plot_integration_method_comparison(simulated_results, rk4_simulated_results)\n", + "plt.title(\"Simulation with step size 1 and Euler vs. RK4 integration method\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's increase the step size to 2. Note that simulating with a larger step size results in a less accurate simulation result. In this case, the lower-accuracy Euler method is becoming unstable, but the higher-order RK4 method is still resulting in an accurate solution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 10,\n", + " \"dt\": 2,\n", + "}\n", + "\n", + "rk4_config = {\"save_freq\": 10, \"dt\": 2, \"integration_method\": \"rk4\"}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)\n", + "rk4_simulated_results = batt.simulate_to_threshold(future_loading, **rk4_config)\n", + "\n", + "plot_integration_method_comparison(simulated_results, rk4_simulated_results)\n", + "plt.title(\"Simulation with step size 2 and Euler vs. RK4 integration method\", pad=10)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Based on the graph, we can see differences in voltage outputs between the two integration methods. We can see that the simulation using the `RK4` integration method produces a smoother and more accurate curve compared to the simulation using the `Euler` integration method. This is expected, as `RK4` is a higher-order integration method than `Euler` and is generally more accurate, albeit slower to simulate." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook, we've demonstrated how to conduct simulations with prognostics models. The next notebook __[02 Parameter_Estimation](02_Parameter%20Estimation.ipynb)__ will examine how we can estimate and tune model parameters so that simulations can best match the behavior observed in some available data." ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3.12.0 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1327,14 +2159,8 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.0" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" - } } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/_downloads/9d2de0be58a696dca10fad7198dab5ff/full_lstm_model.ipynb b/docs/_downloads/9d2de0be58a696dca10fad7198dab5ff/full_lstm_model.ipynb index 8563b703..77808a37 100644 --- a/docs/_downloads/9d2de0be58a696dca10fad7198dab5ff/full_lstm_model.ipynb +++ b/docs/_downloads/9d2de0be58a696dca10fad7198dab5ff/full_lstm_model.ipynb @@ -1,54 +1,196 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample building a full model with events and thresholds using LSTMStateTransitionModel. \n\nIn this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. \n\nWe then create a subclass of the LSTMStateTransitionModel, defining the event_state and threshold equations as a function of output. We use the generated model and compare to the original model.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom progpy.data_models import LSTMStateTransitionModel\nfrom progpy.models import ThrownObject\n\ndef run_example():\n # -----------------------------------------------------\n # Method 1 - manual definition\n # In this example we complete the models by manually defining event_state \n # and thresholds_met as function of output.\n # -----------------------------------------------------\n TIMESTEP = 0.01\n m = ThrownObject()\n def future_loading(t, x=None):\n return m.InputContainer({}) # No input for thrown object \n\n # Step 1: Generate additional data\n # We will use data generated above, but we also want data at additional timesteps \n print('Generating data...')\n data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP)\n data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)\n data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)\n data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)\n data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)\n\n # Step 2: Data Prep\n # We need to add the timestep as a input\n u = np.array([[TIMESTEP] for _ in data.inputs])\n u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs])\n u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs])\n u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs])\n u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs])\n\n # In this case we are saying that velocity is directly measurable, \n # unlike the original model. This is necessary to calculate the events.\n # Since the outputs will then match the states, we pass in the states below\n\n u_data = [u, u_half, u_quarter, u_twice, u_four]\n z_data = [data.states, data_half.states, data_quarter.states, data_twice.states, data_four.states]\n\n # Step 3: Create model\n print('Creating model...')\n\n # Create a subclass of LSTMStateTransitionModel, \n # overridding event-related methods and members\n class LSTMThrownObject(LSTMStateTransitionModel):\n events = [\n 'falling', # Event- object is falling\n 'impact' # Event- object has impacted ground\n ]\n\n def initialize(self, u=None, z=None):\n # Add logic required for thrown object\n self.max_x = 0.0\n return super().initialize(u, z)\n\n def event_state(self, x):\n # Using class name instead of self allows the class to be subclassed\n z = LSTMThrownObject.output(self, x)\n # Logic from ThrownObject.event_state, using output instead of state\n self.max_x = max(self.max_x, z['x']) # Maximum altitude\n return {\n 'falling': max(z['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed\n 'impact': max(z['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height\n }\n\n def threshold_met(self, x):\n z = LSTMThrownObject.output(self, x)\n # Logic from ThrownObject.threshold_met, using output instead of state\n return {\n 'falling': z['v'] < 0,\n 'impact': z['x'] <= 0\n }\n \n # Step 4: Generate Model\n print('Building model...')\n m2 = LSTMThrownObject.from_data(\n inputs=u_data, \n outputs=z_data,\n window=4, \n epochs=30, \n input_keys = ['dt'],\n output_keys = m.states)\n\n # Step 5: Simulate with model\n t_counter = 0\n x_counter = m.initialize()\n def future_loading3(t, x = None):\n nonlocal t_counter, x_counter\n z = m2.InputContainer({'x_t-1': x_counter['x'], 'v_t-1': x_counter['v'], 'dt': t - t_counter})\n x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)\n t_counter = t\n return z\n\n # Use new dt, not used in training\n # Using a dt not used in training will demonstrate the model's \n # ability to handle different timesteps not part of training set\n data = m.simulate_to_threshold(future_loading, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)\n results3 = m2.simulate_to_threshold(future_loading3, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)\n\n # Step 6: Compare Results\n print('Comparing results...')\n print('Predicted impact time:')\n print('\\tOriginal: ', data.times[-1])\n print('\\tLSTM: ', results3.times[-1])\n data.outputs.plot(title='original model')\n results3.outputs.plot(title='generated model')\n plt.show()\n\nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample building a full model with events and thresholds using LSTMStateTransitionModel. \n\nIn this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. \n\nWe then create a subclass of the LSTMStateTransitionModel, defining the event_state and threshold equations as a function of output. We use the generated model and compare to the original model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "from progpy.data_models import LSTMStateTransitionModel\n", + "from progpy.models import ThrownObject\n", + "\n", + "\n", + "def run_example():\n", + " # -----------------------------------------------------\n", + " # Method 1 - manual definition\n", + " # In this example we complete the models by manually defining event_state\n", + " # and thresholds_met as function of output.\n", + " # -----------------------------------------------------\n", + " TIMESTEP = 0.01\n", + " m = ThrownObject()\n", + "\n", + " def future_loading(t, x=None):\n", + " return m.InputContainer({}) # No input for thrown object\n", + "\n", + " # Step 1: Generate additional data\n", + " # We will use data generated above, but we also want data at additional timesteps\n", + " print(\"Generating data...\")\n", + " data = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP, dt=TIMESTEP\n", + " )\n", + " data_half = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2\n", + " )\n", + " data_quarter = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4\n", + " )\n", + " data_twice = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2\n", + " )\n", + " data_four = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4\n", + " )\n", + "\n", + " # Step 2: Data Prep\n", + " # We need to add the timestep as a input\n", + " u = np.array([[TIMESTEP] for _ in data.inputs])\n", + " u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs])\n", + " u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs])\n", + " u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs])\n", + " u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs])\n", + "\n", + " # In this case we are saying that velocity is directly measurable,\n", + " # unlike the original model. This is necessary to calculate the events.\n", + " # Since the outputs will then match the states, we pass in the states below\n", + "\n", + " u_data = [u, u_half, u_quarter, u_twice, u_four]\n", + " z_data = [\n", + " data.states,\n", + " data_half.states,\n", + " data_quarter.states,\n", + " data_twice.states,\n", + " data_four.states,\n", + " ]\n", + "\n", + " # Step 3: Create model\n", + " print(\"Creating model...\")\n", + "\n", + " # Create a subclass of LSTMStateTransitionModel,\n", + " # overridding event-related methods and members\n", + " class LSTMThrownObject(LSTMStateTransitionModel):\n", + " events = [\n", + " \"falling\", # Event- object is falling\n", + " \"impact\", # Event- object has impacted ground\n", + " ]\n", + "\n", + " def initialize(self, u=None, z=None):\n", + " # Add logic required for thrown object\n", + " self.max_x = 0.0\n", + " return super().initialize(u, z)\n", + "\n", + " def event_state(self, x):\n", + " # Using class name instead of self allows the class to be subclassed\n", + " z = LSTMThrownObject.output(self, x)\n", + " # Logic from ThrownObject.event_state, using output instead of state\n", + " self.max_x = max(self.max_x, z[\"x\"]) # Maximum altitude\n", + " return {\n", + " \"falling\": max(\n", + " z[\"v\"] / self.parameters[\"throwing_speed\"], 0\n", + " ), # Throwing speed is max speed\n", + " \"impact\": max(\n", + " z[\"x\"] / self.max_x, 0\n", + " ), # 1 until falling begins, then it's fraction of height\n", + " }\n", + "\n", + " def threshold_met(self, x):\n", + " z = LSTMThrownObject.output(self, x)\n", + " # Logic from ThrownObject.threshold_met, using output instead of state\n", + " return {\"falling\": z[\"v\"] < 0, \"impact\": z[\"x\"] <= 0}\n", + "\n", + " # Step 4: Generate Model\n", + " print(\"Building model...\")\n", + " m2 = LSTMThrownObject.from_data(\n", + " inputs=u_data,\n", + " outputs=z_data,\n", + " window=4,\n", + " epochs=30,\n", + " input_keys=[\"dt\"],\n", + " output_keys=m.states,\n", + " )\n", + "\n", + " # Step 5: Simulate with model\n", + " t_counter = 0\n", + " x_counter = m.initialize()\n", + "\n", + " def future_loading3(t, x=None):\n", + " nonlocal t_counter, x_counter\n", + " z = m2.InputContainer(\n", + " {\"x_t-1\": x_counter[\"x\"], \"v_t-1\": x_counter[\"v\"], \"dt\": t - t_counter}\n", + " )\n", + " x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)\n", + " t_counter = t\n", + " return z\n", + "\n", + " # Use new dt, not used in training\n", + " # Using a dt not used in training will demonstrate the model's\n", + " # ability to handle different timesteps not part of training set\n", + " data = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", dt=TIMESTEP * 3, save_freq=TIMESTEP * 3\n", + " )\n", + " results3 = m2.simulate_to_threshold(\n", + " future_loading3,\n", + " threshold_keys=\"impact\",\n", + " dt=TIMESTEP * 3,\n", + " save_freq=TIMESTEP * 3,\n", + " )\n", + "\n", + " # Step 6: Compare Results\n", + " print(\"Comparing results...\")\n", + " print(\"Predicted impact time:\")\n", + " print(\"\\tOriginal: \", data.times[-1])\n", + " print(\"\\tLSTM: \", results3.times[-1])\n", + " data.outputs.plot(title=\"original model\")\n", + " results3.outputs.plot(title=\"generated model\")\n", + " plt.show()\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/a2f67c14d1e4f8cf993ce8b884f782d6/derived_params.py b/docs/_downloads/a2f67c14d1e4f8cf993ce8b884f782d6/derived_params.py index fd479556..7d35e46a 100644 --- a/docs/_downloads/a2f67c14d1e4f8cf993ce8b884f782d6/derived_params.py +++ b/docs/_downloads/a2f67c14d1e4f8cf993ce8b884f782d6/derived_params.py @@ -2,33 +2,35 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use the derived parameters feature for model building. +Example demonstrating ways to use the derived parameters feature for model building. .. dropdown:: More details - + In this example, a derived parameter (i.e., a parameter that is a function of another parameter) are defined for the simple ThrownObject model. These parameters are then calculated whenever their dependency parameters are updated, eliminating the need to calculate each timestep in simulation. The functionality of this feature is then demonstrated. """ from progpy.models.thrown_object import ThrownObject + def run_example(): # For this example we will use the ThrownObject model from the new_model example. # We will extend that model to include a derived parameter - # Let's assume that the throwing_speed was actually a function of thrower_height + # Let's assume that the throwing_speed was actually a function of thrower_height # (i.e., a taller thrower would throw the ball faster). # Here's how we would implement that # Step 1: Define a function for the relationship between thrower_height and throwing_speed. def update_thrown_speed(params): return { - 'throwing_speed': params['thrower_height'] * 21.85 + "throwing_speed": params["thrower_height"] * 21.85 } # Assumes thrown_speed is linear function of height + # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary # Step 2: Define the param callbacks - ThrownObject.param_callbacks.update({ - 'thrower_height': [update_thrown_speed] - }) # Tell the derived callbacks feature to call this function when thrower_height changes. + ThrownObject.param_callbacks.update( + {"thrower_height": [update_thrown_speed]} + ) # Tell the derived callbacks feature to call this function when thrower_height changes. # Note: Usually we would define this method within the class # for this example, we're doing it separately to improve readability # Note2: You can also have more than one function be called when a single parameter is changed. @@ -36,15 +38,23 @@ def update_thrown_speed(params): # Step 3: Use! obj = ThrownObject() - print("Default Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed'])) - + print( + "Default Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format( + obj.parameters["thrower_height"], obj.parameters["throwing_speed"] + ) + ) + # Now let's change the thrower_height print("changing height...") - obj.parameters['thrower_height'] = 1.75 # Our thrower is 1.75 m tall - print("\nUpdated Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed'])) + obj.parameters["thrower_height"] = 1.75 # Our thrower is 1.75 m tall + print( + "\nUpdated Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format( + obj.parameters["thrower_height"], obj.parameters["throwing_speed"] + ) + ) print("Notice how speed changed automatically with height") -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py b/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py index 2b06be05..ebcd5beb 100644 --- a/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py +++ b/docs/_downloads/a53af2a664a02d85e1a95ed94e2629b1/noise.py @@ -8,106 +8,124 @@ import matplotlib.pyplot as plt from progpy.models.thrown_object import ThrownObject + def run_example(): # Define future loading - def future_load(t=None, x=None): + def future_load(t=None, x=None): # The thrown object model has no inputs- you cannot load the system (i.e., affect it once it's in the air) # So we return an empty input container return m.InputContainer({}) # Define configuration for simulation config = { - 'events': 'impact', # Simulate until the thrown object has impacted the ground - 'dt': 0.005, # Time step (s) - 'save_freq': 0.5, # Frequency at which results are saved (s) + "events": "impact", # Simulate until the thrown object has impacted the ground + "dt": 0.005, # Time step (s) + "save_freq": 0.5, # Frequency at which results are saved (s) } # Define a function to print the results - will be used later def print_results(simulated_results): # Print results - print('states:') - for (t,x) in zip(simulated_results.times, simulated_results.states): - print('\t{:.2f}s: {}'.format(t, x)) + print("states:") + for t, x in zip(simulated_results.times, simulated_results.states): + print("\t{:.2f}s: {}".format(t, x)) - print('outputs:') - for (t,x) in zip(simulated_results.times, simulated_results.outputs): - print('\t{:.2f}s: {}'.format(t, x)) + print("outputs:") + for t, x in zip(simulated_results.times, simulated_results.outputs): + print("\t{:.2f}s: {}".format(t, x)) - print('\nimpact time: {:.2f}s'.format(simulated_results.times[-1])) + print("\nimpact time: {:.2f}s".format(simulated_results.times[-1])) # The simulation stopped at impact, so the last element of times is the impact time # Plot results simulated_results.states.plot() # Ex1: No noise - m = ThrownObject(process_noise = False) + m = ThrownObject(process_noise=False) simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex1: No noise') + plt.title("Ex1: No noise") # Ex2: with noise - same noise applied to every state process_noise = 15 - m = ThrownObject(process_noise = process_noise) # Noise with a std of 0.5 to every state - print('\nExample without same noise for every state') + m = ThrownObject( + process_noise=process_noise + ) # Noise with a std of 0.5 to every state + print("\nExample without same noise for every state") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex2: Basic Noise') + plt.title("Ex2: Basic Noise") # Ex3: noise- more noise on position than velocity - process_noise = {'x': 30, 'v': 1} - m = ThrownObject(process_noise = process_noise) - print('\nExample with more noise on position than velocity') + process_noise = {"x": 30, "v": 1} + m = ThrownObject(process_noise=process_noise) + print("\nExample with more noise on position than velocity") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex3: More noise on position') + plt.title("Ex3: More noise on position") # Ex4: noise- Ex3 but uniform - process_noise_dist = 'uniform' - model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise} - m = ThrownObject(**model_config) - print('\nExample with more uniform noise') + process_noise_dist = "uniform" + model_config = { + "process_noise_dist": process_noise_dist, + "process_noise": process_noise, + } + m = ThrownObject(**model_config) + print("\nExample with more uniform noise") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex4: Ex3 with uniform dist') + plt.title("Ex4: Ex3 with uniform dist") # Ex5: noise- Ex3 but triangle - process_noise_dist = 'triangular' - model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise} - m = ThrownObject(**model_config) - print('\nExample with triangular process noise') + process_noise_dist = "triangular" + model_config = { + "process_noise_dist": process_noise_dist, + "process_noise": process_noise, + } + m = ThrownObject(**model_config) + print("\nExample with triangular process noise") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex5: Ex3 with triangular dist') + plt.title("Ex5: Ex3 with triangular dist") # Ex6: Measurement noise # Everything we've done with process noise, we can also do with measurement noise. - # Just use 'measurement_noise' and 'measurement_noise_dist' - measurement_noise = {'x': 20} # For each output - measurement_noise_dist = 'uniform' - model_config = {'measurement_noise_dist': measurement_noise_dist, 'measurement_noise': measurement_noise} - m = ThrownObject(**model_config) - print('\nExample with measurement noise') - print('- Note: outputs are different than state- this is the application of measurement noise') + # Just use 'measurement_noise' and 'measurement_noise_dist' + measurement_noise = {"x": 20} # For each output + measurement_noise_dist = "uniform" + model_config = { + "measurement_noise_dist": measurement_noise_dist, + "measurement_noise": measurement_noise, + } + m = ThrownObject(**model_config) + print("\nExample with measurement noise") + print( + "- Note: outputs are different than state- this is the application of measurement noise" + ) simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex6: Measurement noise') + plt.title("Ex6: Measurement noise") # Ex7: OK, now for something a little more complicated. Let's try proportional noise on v only (more variation when it's going faster) # This can be used to do custom or more complex noise distributions - def apply_proportional_process_noise(self, x, dt = 1): - x['v'] -= dt*0.5*x['v'] + def apply_proportional_process_noise(self, x, dt=1): + x["v"] -= dt * 0.5 * x["v"] return x - model_config = {'process_noise': apply_proportional_process_noise} + + model_config = {"process_noise": apply_proportional_process_noise} m = ThrownObject(**model_config) - print('\nExample with proportional noise on velocity') + print("\nExample with proportional noise on velocity") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex7: Proportional noise on velocity') + plt.title("Ex7: Proportional noise on velocity") - print('\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value') - print('e.g., numpy.random.seed(42)') + print( + "\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value" + ) + print("e.g., numpy.random.seed(42)") plt.show() -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/a61393d56728af1a85621dfbed0a6ee1/particle_filter_battery_example.py b/docs/_downloads/a61393d56728af1a85621dfbed0a6ee1/particle_filter_battery_example.py new file mode 100644 index 00000000..3029211f --- /dev/null +++ b/docs/_downloads/a61393d56728af1a85621dfbed0a6ee1/particle_filter_battery_example.py @@ -0,0 +1,109 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + +""" +In this example the BatteryElectroChemEOD model is used with a particle filter to estimate the state of the battery +""" + +import matplotlib.pyplot as plt +import numpy as np +from progpy import * +from progpy.models import BatteryElectroChemEOD + + +def run_example(): + ## Setup + # Save battery model + # Time increment + dt = 1 + # Process noise + Q_vars = { + "tb": 1, + "Vo": 0.01, + "Vsn": 0.01, + "Vsp": 0.01, + "qnB": 1, + "qnS": 1, + "qpB": 1, + "qpS": 1, + } + # Measurement noise + R_vars = {"t": 2, "v": 0.02} + battery = BatteryElectroChemEOD( + process_noise=Q_vars, measurement_noise=R_vars, dt=dt + ) + load = battery.InputContainer({"i": 1}) # Optimization + + def future_loading(t, x=None): + return load + + # Simulate data until EOD + start_u = future_loading(0) + start_x = battery.initialize(start_u) + start_y = battery.output(start_x) + sim_results = battery.simulate_to_threshold(future_loading, start_y, save_freq=1) + + # Run particle filter + all_particles = [] + n_times = int( + np.round( + np.random.uniform( + len(sim_results.times) * 0.25, len(sim_results.times) * 0.45, 1 + ) + ) + ) # Random current time + + for i in range(n_times): + if i == 0: + batt_pf = state_estimators.ParticleFilter( + model=battery, x0=sim_results.states[i], num_particles=250 + ) + else: + batt_pf.estimate( + t=sim_results.times[i], + u=sim_results.inputs[i], + z=sim_results.outputs[i], + ) + all_particles.append(batt_pf.particles) + + # Mean of the particles + alpha = 0.05 + states_vsn = [s["tb"] for s in sim_results.states] + pf_mean = [ + {key: np.mean(ps[key]) for key in battery.states} for ps in all_particles + ] + pf_low = [ + {key: np.quantile(ps[key], alpha / 2.0) for key in battery.states} + for ps in all_particles + ] + pf_upp = [ + {key: np.quantile(ps[key], 1.0 - alpha / 2.0) for key in battery.states} + for ps in all_particles + ] + print("First State:", pf_mean[0]) + print("Current State:", pf_mean[-1]) + plt.plot( + sim_results.times[:n_times], + [p["tb"] for p in pf_mean], + linewidth=0.7, + color="blue", + ) + plt.plot( + sim_results.times[:n_times], + states_vsn[:n_times], + "--", + linewidth=0.7, + color="red", + ) + plt.fill_between( + sim_results.times[:n_times], + [p["tb"] for p in pf_low], + [p["tb"] for p in pf_upp], + alpha=0.5, + color="blue", + ) + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/a7a0c06abbfa7b99a3472db7fe0cc1a3/02_Parameter Estimation.ipynb b/docs/_downloads/a7a0c06abbfa7b99a3472db7fe0cc1a3/02_Parameter Estimation.ipynb new file mode 100644 index 00000000..d203256b --- /dev/null +++ b/docs/_downloads/a7a0c06abbfa7b99a3472db7fe0cc1a3/02_Parameter Estimation.ipynb @@ -0,0 +1,1137 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Parameter Estimation" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Parameter estimation is used to tune the parameters of a general model so that its behavior matches that of a specific system. For example, the parameters of a battery model can be tuned to configure the model to more accurately describe the behavior of a specific battery.\n", + "\n", + "Generally, parameter estimation is done by tuning the parameters of the model so that the simulation (see __[01 Simulation](01_Simulation.ipynb)__) best matches the behavior observed in some available data. This is done using a mixture of data, knowledge (e.g., from system specs), and intuition. For large, complex models, it can be VERY difficult and computationally expensive.\n", + "\n", + "In ProgPy, parameter estimation is done using the `progpy.PrognosticsModel.estimate_params()` method. This method takes input and output data from one or more runs, and uses `scipy.optimize.minimize` function to estimate the parameters of the model. For more information, refer to the documentation [here](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation).\n", + "\n", + "A few definitions:\n", + "\n", + "* __`keys`__ `(list[str])`: Parameter keys to optimize\n", + "* __`times`__ `(list[float])`: Array of times for each run\n", + "* __`inputs`__ `(list[InputContainer])`: Array of input containers where inputs[x] corresponds to times[x]\n", + "* __`outputs`__ `(list[OutputContainer])`: Array of output containers where outputs[x] corresponds to times[x]\n", + "* __`method`__ `(str, optional)`: Optimization method. See `scipy.optimize.minimize`\n", + "* __`tol`__ `(int, optional)`: Tolerance for termination. Depending on the provided minimization method, specifying tolerance sets solver-specific options to tol\n", + "* __`error_method`__ `(str, optional)`: Method to use in calculating error. See [`calc_error`](https://nasa.github.io/progpy/api_ref/progpy/PrognosticModel.html?highlight=calc_error#progpy.PrognosticsModel.calc_error) for options\n", + "* __`bounds`__ `(tuple or dict, optional)`: Bounds for optimization in format ((lower1, upper1), (lower2, upper2), ...) or {key1: (lower1, upper1), key2: (lower2, upper2), ...}\n", + "* __`options`__ `(dict, optional)`: Options passed to optimizer. See `scipy.optimize.minimize` for options" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "* [Simple Example](#Simple-Example)\n", + "* [Using Tol](#Using-Tol)\n", + "* [Handling Noise with Multiple Runs](#Handling-Noise-with-Multiple-Runs)\n", + "* [Simplified Battery](#Simplified-Battery)\n", + " * [Data Prep](#Data-Prep)\n", + " * [Set Up Model](#Set-Up-Model)\n", + " * [Parameter Estimation](#Parameter-Estimation)\n", + "* [Conclusion](#Conclusion)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Example" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we estimate the model parameters from data. In general, the data will usually be collected from the physical system or from a different model (model surrogacy). In this case, we will use example data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times = [0, 1, 2, 3, 4, 5, 6, 7]\n", + "inputs = [{}] * 8\n", + "outputs = [\n", + " {\"x\": 1.83},\n", + " {\"x\": 36.5091999066245},\n", + " {\"x\": 60.05364349596605},\n", + " {\"x\": 73.23733081022635},\n", + " {\"x\": 76.47528104941956},\n", + " {\"x\": 69.9146810161441},\n", + " {\"x\": 53.74272753819968},\n", + " {\"x\": 28.39355725512131},\n", + "]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we will import a model from the ProgPy Package. For this example, we will be using the simple ThrownObject model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import ThrownObject" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now build a model with a best guess for the parameters. We will guess that our thrower is 20 meters tall, has a throwing speed of 3.1 $m/s$, and that acceleration due to gravity is 15 $m/s^2$. However, given our times, inputs, and outputs, we can clearly tell this is not true! Let's see if parameter estimation can fix this." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = ThrownObject(thrower_height=20, throwing_speed=3.1, g=15)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we will define specific parameters that we want to estimate. We can pass the desired parameters to our __keys__ keyword argument." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"thrower_height\", \"throwing_speed\", \"g\"]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To really see what `estimate_params()` is doing, we will print out the state before executing the estimation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Printing state before\n", + "print(\"Model configuration before\")\n", + "for key in keys:\n", + " print(\"-\", key, m[key])\n", + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that the error is quite high. This indicates that the parameters are not accurate.\n", + "\n", + "Now, we will run `estimate_params()` with the data to correct these parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.estimate_params(times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see what the new parameters are after estimation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"\\nOptimized configuration\")\n", + "for key in keys:\n", + " print(\"-\", key, m[key])\n", + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Sure enough, parameter estimation determined that the thrower's height wasn't 20m. Instead, it was closer to 1.8m, a much more reasonable height. Parameter estimation also correctly estimated g as ~-9.81 $m/s^2$ and throwing speed at around 40 $m/s$, the values used to generate our example data." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Tol" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An additional feature of the `estimate_params()` function is the tolerance feature, or `tol`. The exact function that the `tol` argument\n", + "uses is specific to the method used. For example, the `tol` argument for the `Nelder-Mead` method is the change in the lowest error and its corresponding parameter values between iterations. The difference between iterations for both of these must be below `tol` for parameter estimation to converge.\n", + "\n", + "For example, if in the nth iteration of the optimizer above the best error was __2e-5__ and the cooresponding values were thrower_height=1.8, throwing_speed=40, and g=-9.8 and at the n+1th iteration the best error was __1e-5__ and the cooresponding values were thrower_height=1.85, throwing_speed=39.5, and g=-9.81, then the difference in error would be __1e-5__ and the difference in parameter values would be \n", + "\n", + "$$\\sqrt{(1.85 - 1.8)^2 + (40 - 39.5)^2 + (9 - 9.81)^2} = 0.5025932749$$\n", + "\n", + "In this case, error would meet a tol of __1e-4__, but the parameters would not, so optimization would continue. For more information, see the [scipy.optimize.minimize](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html) documentation.\n", + "\n", + "In our previous example, note that our total error was roughly __6e-10__ after the `estimate_params()` call, using the default `tol` of __1e-4__. Now, let us see what happens to the parameters when we pass a tolerance of __1e-6__." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = ThrownObject(thrower_height=20, throwing_speed=3.1, g=15)\n", + "m.estimate_params(\n", + " times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1, tol=1e-6\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", + "for key in keys:\n", + " print(\"-\", key, m[key])\n", + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As expected, reducing the tolerance leads to a decrease in the overall error, resulting in more accurate parameters.\n", + "\n", + "Note that if we were to set a high tolerance, such as 10, our error would consequently be very high! Also note that the tol value is for scipy minimize. It is different but strongly correlated to the result of calc_error. For more information on how the `tol` feature works, please refer to scipy's `minimize()` [documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html).\n", + "\n", + "You can also adjust the metric that is used to estimate parameters by setting the error_method to a different `calc_error()` method (see example below). Default is Mean Squared Error (`MSE`). See `calc_error()` method for list of options." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m[\"thrower_height\"] = 3.1\n", + "m[\"throwing_speed\"] = 29\n", + "\n", + "# Using MAE, or Mean Absolute Error instead of the default Mean Squared Error.\n", + "m.estimate_params(\n", + " times=times,\n", + " inputs=inputs,\n", + " outputs=outputs,\n", + " keys=keys,\n", + " dt=0.1,\n", + " tol=1e-9,\n", + " error_method=\"MAX_E\",\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", + "for key in keys:\n", + " print(\"-\", key, m[key])\n", + "print(\" Error: \", m.calc_error(times, inputs, outputs, dt=0.1, method=\"MAX_E\"))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that `MAX_E` is frequently better at capturing tail behavior in many prognostic models." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Handling Noise with Multiple Runs" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the previous two examples, we demonstrated how to use `estimate_params()` using a clearly defined ThrownObject model. However we assumed that there would be no noise in the data used to estimate parameters. This is almost never the case in real life.\n", + "\n", + "In this example, we'll show how to use `estimate_params()` with noisy data. First, let's repeat the previous example, this time generating data from a noisy model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = ThrownObject(process_noise=1)\n", + "results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", + "\n", + "# Resetting parameters to their incorrectly set values.\n", + "m[\"thrower_height\"] = 20\n", + "m[\"throwing_speed\"] = 3.1\n", + "m[\"g\"] = 15\n", + "keys = [\"thrower_height\", \"throwing_speed\", \"g\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.estimate_params(\n", + " times=results.times, inputs=results.inputs, outputs=results.outputs, keys=keys\n", + ")\n", + "print(\"\\nOptimized configuration\")\n", + "for key in keys:\n", + " print(\"-\", key, m[key])\n", + "print(\" Error: \", m.calc_error(results.times, results.inputs, results.outputs))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, the error from calc_error is low. To have an accurate estimation of the error, we should actually be manually measuring the Absolute Mean Error rather than using `calc_error()`.\n", + "\n", + "The reason being is simple. `calc_error()` is calculating the error between the simulated and observed data. However, the observed and simulated data in this case are being generated from a model that has noise. In other words, we are comparing the difference of noise to noise, which can lead to inconsistent results.\n", + "\n", + "Let's create a helper function to calculate the Absolute Mean Error between our original and estimated parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Creating a new model with the original parameters to compare to the model with noise.\n", + "true_Values = ThrownObject()\n", + "\n", + "\n", + "# Function to determine the Absolute Mean Error (AME) of the model parameters.\n", + "def AME(m, keys):\n", + " error = 0\n", + " for key in keys:\n", + " error += abs(m[key] - true_Values[key])\n", + " return error" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using our new AME function, we see that the error isn't as great as we thought." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "AME(m, keys)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the error changes every time due to the randomness of noise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for count in range(10):\n", + " m = ThrownObject(process_noise=1)\n", + " results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", + "\n", + " # Resetting parameters to their originally incorrectly set values.\n", + " m[\"thrower_height\"] = 20\n", + " m[\"throwing_speed\"] = 3.1\n", + " m[\"g\"] = 15\n", + "\n", + " m.estimate_params(\n", + " times=results.times,\n", + " inputs=results.inputs,\n", + " outputs=results.outputs,\n", + " keys=keys,\n", + " dt=0.1,\n", + " )\n", + " error = AME(m, [\"thrower_height\", \"throwing_speed\", \"g\"])\n", + " print(f\"Estimate Call Number {count} - AME Error {error}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This issue with noise can be overcome with more data. Let's repeat the example above, this time using data from multiple runs. First, let's generate the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times, inputs, outputs = [], [], []\n", + "m = ThrownObject(process_noise=1)\n", + "for count in range(20):\n", + " results = m.simulate_to_threshold(save_freq=0.5, dt=(\"auto\", 0.1))\n", + " times.append(results.times)\n", + " inputs.append(results.inputs)\n", + " outputs.append(results.outputs)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's reset the parameters to our incorrect values." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m[\"thrower_height\"] = 20\n", + "m[\"throwing_speed\"] = 3.1\n", + "m[\"g\"] = 15" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we will call `estimate_params()` with all the collected data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.estimate_params(times=times, inputs=inputs, outputs=outputs, keys=keys, dt=0.1)\n", + "print(\"\\nOptimized configuration\")\n", + "for key in keys:\n", + " print(\"-\", key, m[key])\n", + "error = AME(m, [\"thrower_height\", \"throwing_speed\", \"g\"])\n", + "print(\"AME Error: \", error)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that by using data from multiple runs, we are able to produce a lower AME Error than before. This is because we are able to simulate the noise multiple times, which in turn, allows our `estimate_params()` to produce a more accurate result since it is given more data to work with." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simplified Battery" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The previous examples all used a simple model, the ThrownObject. For large, complex models, it can be VERY difficult and computationally expensive.\n", + "\n", + "In this example, we will estimate the parameters for the simplified battery model. This model is more complex than the ThrownObject model but is still a relatively simple model. This example demonstrates some approaches useful for estimating parameters in complex models, like estimating parameter subsets on data selected to highlight specific features.\n", + "\n", + "Let's prepare some data for parameter estimation. We will be using the datasets subpackage in progpy for this." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Data Prep" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.datasets import nasa_battery\n", + "\n", + "(desc, data) = nasa_battery.load_data(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The dataset includes 4 different kinds of runs: trickle, step, reference, random walk. We're going to split the dataset into one example for each of the different types for use later.\n", + "\n", + "Let's take a look at the trickle discharge run first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trickle_dataset = data[0]\n", + "print(trickle_dataset)\n", + "trickle_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's do the same for a reference discharge run (5)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reference_dataset = data[5]\n", + "reference_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we will do it for the step runs. Note that this is actually multiple runs that we need to combine. `relativeTime` resets for each \"run\". So if we're going to use multiple runs together, we need to stitch these times together." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[7][\"absoluteTime\"] = data[7][\"relativeTime\"]\n", + "for i in range(8, 32):\n", + " data[i][\"absoluteTime\"] = (\n", + " data[i][\"relativeTime\"] + data[i - 1][\"absoluteTime\"].iloc[-1]\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we should combine the data into a single dataset and investigate the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "step_dataset = pd.concat(data[7:32], ignore_index=True)\n", + "print(step_dataset)\n", + "step_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's investigate the random walk discharge. Like the step discharge, we need to stitch together the times and concatenate the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[35][\"absoluteTime\"] = data[35][\"relativeTime\"]\n", + "for i in range(36, 50):\n", + " data[i][\"absoluteTime\"] = (\n", + " data[i][\"relativeTime\"] + data[i - 1][\"absoluteTime\"].iloc[-1]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "random_walk_dataset = pd.concat(data[35:50], ignore_index=True)\n", + "print(random_walk_dataset)\n", + "random_walk_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the data is ready for this tutorial, let's dive into it." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set Up Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import SimplifiedBattery\n", + "\n", + "m = SimplifiedBattery()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Parameter Estimation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the parameter space." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now test how well it fits the random walk dataset. First, let's prepare the data and future load equation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times_rw = random_walk_dataset[\"absoluteTime\"]\n", + "inputs_rw = [\n", + " elem[1][\"voltage\"] * elem[1][\"current\"] for elem in random_walk_dataset.iterrows()\n", + "]\n", + "outputs_rw = [{\"v\": elem[1][\"voltage\"]} for elem in random_walk_dataset.iterrows()]\n", + "\n", + "import numpy as np\n", + "\n", + "\n", + "def future_load_rw(t, x=None):\n", + " power = np.interp(t, times_rw, inputs_rw)\n", + " return {\"P\": power}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now evaluate how well the battery matches the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=100\n", + ")\n", + "from matplotlib import pyplot as plt\n", + "\n", + "plt.figure()\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]])\n", + "plt.plot(result.times, [z[\"v\"] for z in result.outputs])\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "fig = result.event_states.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a terrible fit. Clearly, the battery model isn't properly configured for this specific battery. Reading through the paper, we see that the default parameters are for a larger battery pouch present in a UAV, much larger than the 18650 battery that produced our dataset.\n", + "\n", + "To correct this, we need to estimate the model parameters.\n", + "\n", + "There are 7 parameters to set (assuming initial SOC is always 1). We can start with setting a few parameters we know. We know that $v_L$ is about 4.2 (from the battery specs). We also expect that the battery internal resistance is the same as that in the electrochemistry model (which also uses an 18650). Finally, we know that the capacity of this battery is significantly smaller than the default values for the larger pouch battery." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m[\"v_L\"] = 4.2 # We know this\n", + "from progpy.models import BatteryElectroChemEOD\n", + "\n", + "m[\"R_int\"] = BatteryElectroChemEOD.default_parameters[\"Ro\"]\n", + "m[\"E_crit\"] /= 4 # Battery capacity is much smaller" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's take a look at the model fit again and see where that got us." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result_guess = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]])\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs])\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Much better, but not there yet. Next, we need to use the parameter estimation feature to estimate the parameters further. Let's prepare some data. We'll use the trickle, reference, and step datasets for this. These are close enough temporally that we can expect aging effects to be minimal.\n", + "\n", + "**NOTE: It is important to use a different dataset to estimate parameters as to test**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times_trickle = trickle_dataset[\"relativeTime\"]\n", + "inputs_trickle = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in trickle_dataset.iterrows()\n", + "]\n", + "outputs_trickle = [{\"v\": elem[1][\"voltage\"]} for elem in trickle_dataset.iterrows()]\n", + "\n", + "times_ref = reference_dataset[\"relativeTime\"]\n", + "inputs_ref = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in reference_dataset.iterrows()\n", + "]\n", + "outputs_ref = [{\"v\": elem[1][\"voltage\"]} for elem in reference_dataset.iterrows()]\n", + "\n", + "times_step = step_dataset[\"relativeTime\"]\n", + "inputs_step = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]} for elem in step_dataset.iterrows()\n", + "]\n", + "outputs_step = [{\"v\": elem[1][\"voltage\"]} for elem in step_dataset.iterrows()]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can print the keys and the error beforehand for reference. The error here is what is used to estimate parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "inputs_reformatted_rw = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in random_walk_dataset.iterrows()\n", + "]\n", + "all_keys = [\"v_L\", \"R_int\", \"lambda\", \"gamma\", \"mu\", \"beta\", \"E_crit\"]\n", + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_guess = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(\"Error: \", error_guess)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's set the bounds on each of the parameters.\n", + "\n", + "For $v_L$ and $R_{int}$, we're defining some small bounds because we have an idea of what they might be. For the others we are saying it's between 0.1 and 10x the default battery. We also are adding a constraint that E_crit must be smaller than the default, since we know it's a smaller battery." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bounds = {\n", + " \"v_L\": (3.75, 4.5),\n", + " \"R_int\": (\n", + " BatteryElectroChemEOD.default_parameters[\"Ro\"] * 0.5,\n", + " BatteryElectroChemEOD.default_parameters[\"Ro\"] * 2.5,\n", + " ),\n", + " \"lambda\": (0.046 / 10, 0.046 * 10),\n", + " \"gamma\": (3.355 / 10, 3.355 * 10),\n", + " \"mu\": (2.759 / 10, 2.759 * 10),\n", + " \"beta\": (8.482 / 10, 8.482 * 10),\n", + " \"E_crit\": (202426.858 / 10, 202426.858), # (smaller than default)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll estimate the parameters. See the [Parameter Estimation](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation) section in the ProgPy documentation for more details.\n", + "\n", + "We can throw all of the data into estimate parameters, but that will take a long time to run and is prone to errors (e.g., getting stuck in local minima). For this example, we will split characterization into parts.\n", + "\n", + "First, we try to capture the base voltage ($v_L$). If we look at the equation above, $v_L$ is the only term that is not a function of either SOC or power. So, for this estimation we use the trickle dataset, where power draw is the lowest, and we only use the first section where SOC can be assumed to be about 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"v_L\"]\n", + "m.estimate_params(\n", + " times=trickle_dataset[\"relativeTime\"].iloc[:10].to_list(),\n", + " inputs=inputs_trickle[:10],\n", + " outputs=outputs_trickle[:10],\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now run the simulation and plot the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit1 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_guess}->{error_fit1} ({error_fit1 - error_guess})\")\n", + "\n", + "result_fit1 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1], [error_guess, error_fit1])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A tiny bit closer, but not significant. Our initial guess (from the packaging) must have been pretty good.\n", + "\n", + "The next step is to estimate the effect of current on the battery. The Parameter $R_{int}$ (internal resistance) effects this. To estimate $R_{int}$ we will use 2 runs where power is not minimal (ref and step runs). Again, we will use only the first couple steps so EOL can be assumed to be 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"R_int\"]\n", + "m.estimate_params(\n", + " times=[times_ref.iloc[:5].to_list(), times_step.iloc[:5].to_list()],\n", + " inputs=[inputs_ref[:5], inputs_step[:5]],\n", + " outputs=[outputs_ref[:5], outputs_step[:5]],\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's look at what that got us." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit2 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_fit1}->{error_fit2} ({error_fit2 - error_fit1})\")\n", + "\n", + "result_fit2 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.plot(result_fit2.times, [z[\"v\"] for z in result_fit2.outputs], label=\"fit2\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1, 2], [error_guess, error_fit1, error_fit2])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Much better, but not there yet! Finally we need to estimate the effects of SOC on battery performance. This involves all of the rest of the parameters. For this we will use all the rest of the parameters. We will not be using the entire reference curve to capture a full discharge.\n", + "\n", + "Note that we're using the error_method `MAX_E`, instead of the default `MSE`. This results in parameters that better estimate the end of the discharge curve and is recommended when estimating parameters that are combined with the event state." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"lambda\", \"gamma\", \"mu\", \"beta\", \"E_crit\"]\n", + "m.estimate_params(\n", + " times=times_ref.to_list(),\n", + " inputs=inputs_ref,\n", + " outputs=outputs_ref,\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + " error_method=\"MAX_E\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now run the simulation and plot the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit3 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_fit2}->{error_fit3} ({error_fit3 - error_fit2})\")\n", + "\n", + "result_fit3 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.plot(result_fit2.times, [z[\"v\"] for z in result_fit2.outputs], label=\"fit2\")\n", + "plt.plot(result_fit3.times, [z[\"v\"] for z in result_fit3.outputs], label=\"fit3\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1, 2, 3], [error_guess, error_fit1, error_fit2, error_fit3])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is even better. Now we have an \"ok\" estimate, ~150 mV (for the sake of a demo). The estimate could be refined further by setting a lower tolerance (tol parameter), or repeating the 4 parameter estimation steps, as shown above.\n", + "\n", + "Parameter estimation is also limited by the model itself. This is a simplified battery model, meaning there were some simplifying assumptions made. It will likely not be able to capture the behavior of a model as well as a higher fidelity model (e.g., BatteryElectroChemEOD)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This chapter introduced the concept of parameter estimation, through which the parameters of a physics-based model are estimated. This is done using a mixture of data, knowledge (e.g., from system specs), and intuition. For large, complex models, it can be VERY difficult and computationally expensive. Fortunately, in this case we have a relatively simple model.\n", + "\n", + "In ProgPy a models `estimate_params` method is used to estimate the parameters. See [Parameter Estimation Docs](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation) for more details.\n", + "\n", + "In the next notebook, we will be exploring (see __[03 Existing Models](03_Existing%20Models.ipynb)__)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.12.0 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "f1062708a37074d70712b695aadee582e0b0b9f95f45576b5521424137d05fec" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/a81fef7fd34adf587e2f0a58e5e2d5a7/sensitivity.ipynb b/docs/_downloads/a81fef7fd34adf587e2f0a58e5e2d5a7/sensitivity.ipynb index 992d2737..93e57c8b 100644 --- a/docs/_downloads/a81fef7fd34adf587e2f0a58e5e2d5a7/sensitivity.ipynb +++ b/docs/_downloads/a81fef7fd34adf587e2f0a58e5e2d5a7/sensitivity.ipynb @@ -1,54 +1,125 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample performing a sensitivity analysis on a new model. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Deriv prog model was selected because the model can be described as x' = x + dx*dt\nfrom progpy.models.thrown_object import ThrownObject\nimport numpy as np\n\ndef run_example():\n # Demo model\n # Step 1: Create instance of model\n m = ThrownObject()\n\n # Step 2: Setup for simulation \n def future_load(t, x=None):\n return m.InputContainer({})\n\n # Step 3: Setup range on parameters considered\n thrower_height_range = np.arange(1.2, 2.1, 0.1)\n\n # Step 4: Sim for each \n event = 'impact'\n eods = np.empty(len(thrower_height_range))\n for (i, thrower_height) in zip(range(len(thrower_height_range)), thrower_height_range):\n m.parameters['thrower_height'] = thrower_height\n simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt =1e-3, save_freq =10)\n eods[i] = simulated_results.times[-1]\n\n # Step 5: Analysis\n print('For a reasonable range of heights, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3)))\n sensitivity = (eods[-1]-eods[0])/(thrower_height_range[-1] - thrower_height_range[0])\n print(' - Average sensitivity: {} s per cm height'.format(round(sensitivity/100, 6)))\n print(\" - It seems impact time is not very sensitive to thrower's height\")\n\n # Now lets repeat for throw speed\n throw_speed_range = np.arange(20, 40, 1)\n eods = np.empty(len(throw_speed_range))\n for (i, throw_speed) in zip(range(len(throw_speed_range)), throw_speed_range):\n m.parameters['throwing_speed'] = throw_speed\n simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':1e-3, 'save_freq':10})\n eods[i] = simulated_results.times[-1]\n\n print('\\nFor a reasonable range of throwing speeds, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3)))\n sensitivity = (eods[-1]-eods[0])/(throw_speed_range[-1] - throw_speed_range[0])\n print(' - Average sensitivity: {} s per m/s speed'.format(round(sensitivity/100, 6)))\n print(\" - It seems impact time is much more dependent on throwing speed\")\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample performing a sensitivity analysis on a new model. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Deriv prog model was selected because the model can be described as x' = x + dx*dt\n", + "from progpy.models.thrown_object import ThrownObject\n", + "import numpy as np\n", + "\n", + "\n", + "def run_example():\n", + " # Demo model\n", + " # Step 1: Create instance of model\n", + " m = ThrownObject()\n", + "\n", + " # Step 2: Setup for simulation\n", + " def future_load(t, x=None):\n", + " return m.InputContainer({})\n", + "\n", + " # Step 3: Setup range on parameters considered\n", + " thrower_height_range = np.arange(1.2, 2.1, 0.1)\n", + "\n", + " # Step 4: Sim for each\n", + " event = \"impact\"\n", + " eods = np.empty(len(thrower_height_range))\n", + " for i, thrower_height in zip(\n", + " range(len(thrower_height_range)), thrower_height_range\n", + " ):\n", + " m.parameters[\"thrower_height\"] = thrower_height\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], dt=1e-3, save_freq=10\n", + " )\n", + " eods[i] = simulated_results.times[-1]\n", + "\n", + " # Step 5: Analysis\n", + " print(\n", + " \"For a reasonable range of heights, impact time is between {} and {}\".format(\n", + " round(eods[0], 3), round(eods[-1], 3)\n", + " )\n", + " )\n", + " sensitivity = (eods[-1] - eods[0]) / (\n", + " thrower_height_range[-1] - thrower_height_range[0]\n", + " )\n", + " print(\n", + " \" - Average sensitivity: {} s per cm height\".format(\n", + " round(sensitivity / 100, 6)\n", + " )\n", + " )\n", + " print(\" - It seems impact time is not very sensitive to thrower's height\")\n", + "\n", + " # Now lets repeat for throw speed\n", + " throw_speed_range = np.arange(20, 40, 1)\n", + " eods = np.empty(len(throw_speed_range))\n", + " for i, throw_speed in zip(range(len(throw_speed_range)), throw_speed_range):\n", + " m.parameters[\"throwing_speed\"] = throw_speed\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], options={\"dt\": 1e-3, \"save_freq\": 10}\n", + " )\n", + " eods[i] = simulated_results.times[-1]\n", + "\n", + " print(\n", + " \"\\nFor a reasonable range of throwing speeds, impact time is between {} and {}\".format(\n", + " round(eods[0], 3), round(eods[-1], 3)\n", + " )\n", + " )\n", + " sensitivity = (eods[-1] - eods[0]) / (throw_speed_range[-1] - throw_speed_range[0])\n", + " print(\n", + " \" - Average sensitivity: {} s per m/s speed\".format(\n", + " round(sensitivity / 100, 6)\n", + " )\n", + " )\n", + " print(\" - It seems impact time is much more dependent on throwing speed\")\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/sphinx-config/auto_examples/sim_powertrain.py b/docs/_downloads/a91a8b7d0eec6e8ab4f00ef2efcc2900/sim_powertrain.py similarity index 58% rename from sphinx-config/auto_examples/sim_powertrain.py rename to docs/_downloads/a91a8b7d0eec6e8ab4f00ef2efcc2900/sim_powertrain.py index 9a298539..bf4dd5b2 100644 --- a/sphinx-config/auto_examples/sim_powertrain.py +++ b/docs/_downloads/a91a8b7d0eec6e8ab4f00ef2efcc2900/sim_powertrain.py @@ -2,11 +2,12 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a powertrain being simulated for a set amount of time. +Example of a powertrain being simulated for a set amount of time. """ from progpy.models import Powertrain, ESC, DCMotor + def run_example(): # Create a model object esc = ESC() @@ -15,16 +16,16 @@ def run_example(): # Define future loading function - 100% duty all the time def future_loading(t, x=None): - return powertrain.InputContainer({ - 'duty': 1, - 'v': 23 - }) - + return powertrain.InputContainer({"duty": 1, "v": 23}) + # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - simulated_results = powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + simulated_results = powertrain.simulate_to( + 2, future_loading, dt=2e-5, save_freq=0.1, print=True + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/a9f5f00132a2ac6b21c976835cdb0e04/dataset.py b/docs/_downloads/a9f5f00132a2ac6b21c976835cdb0e04/dataset.py index 0457b16a..f92b6225 100644 --- a/docs/_downloads/a9f5f00132a2ac6b21c976835cdb0e04/dataset.py +++ b/docs/_downloads/a9f5f00132a2ac6b21c976835cdb0e04/dataset.py @@ -6,65 +6,72 @@ .. dropdown:: More details - In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. + In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. """ import matplotlib.pyplot as plt import pickle from prog_models.datasets import nasa_battery + DATASET_ID = 1 + def run_example(): # Step 1: Download and import the dataset for a single battery # Note: This may take some time - print('Downloading... ', end='') + print("Downloading... ", end="") (desc, data) = nasa_battery.load_data(DATASET_ID) - print('done') + print("done") # We recommend saving the dataset to disk for future use # This way you don't have to download it each time - pickle.dump((desc, data), open(f'dataset_{DATASET_ID}.pkl', 'wb')) + pickle.dump((desc, data), open(f"dataset_{DATASET_ID}.pkl", "wb")) # Step 2: Access the dataset description - print(f'\nDataset {DATASET_ID}') - print(desc['description']) - print(f'Procedure: {desc["procedure"]}') + print(f"\nDataset {DATASET_ID}") + print(desc["description"]) + print(f"Procedure: {desc['procedure']}") # Step 3: Access the dataset data # Data is in format [run_id][time][variable] - # For the battery the variables are + # For the battery the variables are # 0: relativeTime (since beginning of run) # 1: current (amps) # 2: voltage # 3: temperature (°C) # so that data[a][b, 3] is the temperature at time index b (relative to the start of the run) for run a - print(f'\nNumber of runs: {len(data)}') - print(f'\nAnalyzing run 4') - print(f'number of time indices: {len(data[4])}') + print(f"\nNumber of runs: {len(data)}") + print("\nAnalyzing run 4") + print(f"number of time indices: {len(data[4])}") print(f"Details of run 4: {desc['runs'][4]}") # Plot the run plt.figure() plt.subplot(2, 1, 1) - plt.plot(data[4]['relativeTime'], data[4]['current']) - plt.ylabel('Current (A)') + plt.plot(data[4]["relativeTime"], data[4]["current"]) + plt.ylabel("Current (A)") plt.subplot(2, 1, 2) - plt.plot(data[4]['relativeTime'], data[4]['voltage']) - plt.ylabel('Voltage (V)') - plt.xlabel('Time (s)') - plt.title('Run 4') + plt.plot(data[4]["relativeTime"], data[4]["voltage"]) + plt.ylabel("Voltage (V)") + plt.xlabel("Time (s)") + plt.title("Run 4") # Graph all reference discharge profiles - indices = [i for i, x in enumerate(desc['runs']) if 'reference discharge' in x['desc'] and 'rest' not in x['desc']] + indices = [ + i + for i, x in enumerate(desc["runs"]) + if "reference discharge" in x["desc"] and "rest" not in x["desc"] + ] plt.figure() for i in indices: - plt.plot(data[i]['relativeTime'], data[i]['voltage'], label=f"Run {i}") - plt.title('Reference discharge profiles') - plt.xlabel('Time (s)') - plt.ylabel('Voltage (V)') + plt.plot(data[i]["relativeTime"], data[i]["voltage"], label=f"Run {i}") + plt.title("Reference discharge profiles") + plt.xlabel("Time (s)") + plt.ylabel("Voltage (V)") plt.show() -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/af1eddffc14489d963504c7eaa4ea256/model_gen.ipynb b/docs/_downloads/af1eddffc14489d963504c7eaa4ea256/model_gen.ipynb index 9fa922ac..edbdaaeb 100644 --- a/docs/_downloads/af1eddffc14489d963504c7eaa4ea256/model_gen.ipynb +++ b/docs/_downloads/af1eddffc14489d963504c7eaa4ea256/model_gen.ipynb @@ -1,54 +1,142 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample generating models from constituent parts. \n\nThe model used for this example is that of an object thrown into the air, predicting the impact event\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Deriv prog model was selected because the model can be described as x' = x + dx*dt\nfrom prog_models import PrognosticsModel\n\ndef run_example():\n # Step 1: Define keys\n keys = {\n 'inputs': [], # no inputs, no way to control\n 'states': [\n 'x', # Position (m) \n 'v' # Velocity (m/s)\n ],\n 'outputs': [ # Anything we can measure\n 'x' # Position (m)\n ],\n 'events': [\n 'falling', # Event- object is falling\n 'impact' # Event- object has impacted ground\n ]\n }\n\n thrower_height = 1.83 # m\n throwing_speed = 40 # m/s\n # Step 2: Define initial state\n def initialize(u, z):\n return {\n 'x': thrower_height, # Thrown, so initial altitude is height of thrower\n 'v': throwing_speed # Velocity at which the ball is thrown - this guy is an professional baseball pitcher\n }\n \n # Step 3: Define dx equation\n def dx(x, u):\n return {\n 'x': x['v'],\n 'v': -9.81 # Acceleration of gravity\n }\n\n # Step 3: Define equation for calculating output/measuremetn\n def output(x):\n return {\n 'x': x['x']\n }\n\n # Step 4: Define threshold equation\n def threshold_met(x):\n return {\n 'falling': x['v'] < 0,\n 'impact': x['x'] <= 0\n }\n\n # Step 5 (optional): Define event state equation- measurement of how close you are to threshold (0-1)\n def event_state(x): \n event_state.max_x = max(event_state.max_x, x['x']) # Maximum altitude\n return {\n 'falling': max(x['v']/throwing_speed,0), # Throwing speed is max speed\n 'impact': max(x['x']/event_state.max_x,0) # 1 until falling begins, then it's fraction of height\n }\n event_state.max_x = 0\n \n # Step 6: Generate model\n m = PrognosticsModel.generate_model(keys, initialize, output, event_state_eqn = event_state, threshold_eqn=threshold_met, dx_eqn=dx)\n\n # Step 7: Setup for simulation \n def future_load(t, x=None):\n return {}\n\n # Step 8: Simulate to impact\n event = 'impact'\n simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt = 0.005, save_freq=1, print = True)\n\n # Print flight time\n print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2)))\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample generating models from constituent parts. \n\nThe model used for this example is that of an object thrown into the air, predicting the impact event\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Deriv prog model was selected because the model can be described as x' = x + dx*dt\n", + "from prog_models import PrognosticsModel\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Define keys\n", + " keys = {\n", + " \"inputs\": [], # no inputs, no way to control\n", + " \"states\": [\n", + " \"x\", # Position (m)\n", + " \"v\", # Velocity (m/s)\n", + " ],\n", + " \"outputs\": [ # Anything we can measure\n", + " \"x\" # Position (m)\n", + " ],\n", + " \"events\": [\n", + " \"falling\", # Event- object is falling\n", + " \"impact\", # Event- object has impacted ground\n", + " ],\n", + " }\n", + "\n", + " thrower_height = 1.83 # m\n", + " throwing_speed = 40 # m/s\n", + "\n", + " # Step 2: Define initial state\n", + " def initialize(u, z):\n", + " return {\n", + " \"x\": thrower_height, # Thrown, so initial altitude is height of thrower\n", + " \"v\": throwing_speed, # Velocity at which the ball is thrown - this guy is an professional baseball pitcher\n", + " }\n", + "\n", + " # Step 3: Define dx equation\n", + " def dx(x, u):\n", + " return {\n", + " \"x\": x[\"v\"],\n", + " \"v\": -9.81, # Acceleration of gravity\n", + " }\n", + "\n", + " # Step 3: Define equation for calculating output/measuremetn\n", + " def output(x):\n", + " return {\"x\": x[\"x\"]}\n", + "\n", + " # Step 4: Define threshold equation\n", + " def threshold_met(x):\n", + " return {\"falling\": x[\"v\"] < 0, \"impact\": x[\"x\"] <= 0}\n", + "\n", + " # Step 5 (optional): Define event state equation- measurement of how close you are to threshold (0-1)\n", + " def event_state(x):\n", + " event_state.max_x = max(event_state.max_x, x[\"x\"]) # Maximum altitude\n", + " return {\n", + " \"falling\": max(x[\"v\"] / throwing_speed, 0), # Throwing speed is max speed\n", + " \"impact\": max(\n", + " x[\"x\"] / event_state.max_x, 0\n", + " ), # 1 until falling begins, then it's fraction of height\n", + " }\n", + "\n", + " event_state.max_x = 0\n", + "\n", + " # Step 6: Generate model\n", + " m = PrognosticsModel.generate_model(\n", + " keys,\n", + " initialize,\n", + " output,\n", + " event_state_eqn=event_state,\n", + " threshold_eqn=threshold_met,\n", + " dx_eqn=dx,\n", + " )\n", + "\n", + " # Step 7: Setup for simulation\n", + " def future_load(t, x=None):\n", + " return {}\n", + "\n", + " # Step 8: Simulate to impact\n", + " event = \"impact\"\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], dt=0.005, save_freq=1, print=True\n", + " )\n", + "\n", + " # Print flight time\n", + " print(\n", + " \"The object hit the ground in {} seconds\".format(\n", + " round(simulated_results.times[-1], 2)\n", + " )\n", + " )\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/b0ce3603387e7aa9d47150b177e46978/utpredictor.py b/docs/_downloads/b0ce3603387e7aa9d47150b177e46978/utpredictor.py index df138d24..ec76df42 100644 --- a/docs/_downloads/b0ce3603387e7aa9d47150b177e46978/utpredictor.py +++ b/docs/_downloads/b0ce3603387e7aa9d47150b177e46978/utpredictor.py @@ -1,7 +1,7 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. """ -An example using the UnscentedTransformPredictor class to predict the degredation of a battery. +An example using the UnscentedTransformPredictor class to predict the degredation of a battery. """ from progpy.models import BatteryCircuit @@ -9,58 +9,60 @@ # from progpy.visualize import plot_hist # import matplotlib.pyplot as plt + def run_example(): ## Setup batt = BatteryCircuit() - def future_loading(t, x = None): - # Variable (piece-wise) future loading scheme - if (t < 600): + + def future_loading(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 600: i = 2 - elif (t < 900): + elif t < 900: i = 1 - elif (t < 1800): + elif t < 1800: i = 4 - elif (t < 3000): + elif t < 3000: i = 2 else: i = 3 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) ## State Estimation - perform a single ukf state estimate step - filt = state_estimators.UnscentedKalmanFilter(batt, batt.parameters['x0']) + filt = state_estimators.UnscentedKalmanFilter(batt, batt.parameters["x0"]) - import matplotlib.pyplot as plt # For plotting print("Prior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) - example_measurements = {'t': 32.2, 'v': 3.915} + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + example_measurements = {"t": 32.2, "v": 3.915} t = 0.1 filt.estimate(t, future_loading(t), example_measurements) print("Posterior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) ## Prediction - Predict EOD given current state # Setup prediction mc = predictors.UnscentedTransformPredictor(batt) # Predict with a step size of 0.1 - mc_results = mc.predict(filt.x, future_loading, dt=0.1, save_freq= 100) + mc_results = mc.predict(filt.x, future_loading, dt=0.1, save_freq=100) # Print Results for i, time in enumerate(mc_results.times): - print('\nt = {}'.format(time)) - print('\tu = {}'.format(mc_results.inputs.snapshot(i).mean)) - print('\tx = {}'.format(mc_results.states.snapshot(i).mean)) - print('\tz = {}'.format(mc_results.outputs.snapshot(i).mean)) - print('\tevent state = {}'.format(mc_results.event_states.snapshot(i).mean)) + print("\nt = {}".format(time)) + print("\tu = {}".format(mc_results.inputs.snapshot(i).mean)) + print("\tx = {}".format(mc_results.states.snapshot(i).mean)) + print("\tz = {}".format(mc_results.outputs.snapshot(i).mean)) + print("\tevent state = {}".format(mc_results.event_states.snapshot(i).mean)) - print('\nToE:', mc_results.time_of_event.mean) + print("\nToE:", mc_results.time_of_event.mean) # You can also access the final state (of type UncertainData), like so: final_state = mc_results.time_of_event.final_state - print('Final state @EOD: ', final_state['EOD'].mean) + print("Final state @EOD: ", final_state["EOD"].mean) # toe.plot_hist() # plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py b/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py index ad4de0f9..6c2c2372 100644 --- a/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py +++ b/docs/_downloads/b0f882b08ad06c468dcd7cc5ad370a58/sim_battery_eol.py @@ -2,23 +2,24 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). +Example of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). """ import matplotlib.pyplot as plt from progpy.models import BatteryElectroChem as Battery -def run_example(): + +def run_example(): # Step 1: Create a model object batt = Battery() # Step 2: Define future loading function - # Here we're using a function designed to charge until 0.95, + # Here we're using a function designed to charge until 0.95, # then discharge until 0.05 load = 1 def future_loading(t, x=None): - nonlocal load + nonlocal load # Rule for loading after initialization if x is not None: @@ -29,26 +30,33 @@ def future_loading(t, x=None): elif event_state["EOD"] < 0.05: load = -1 # Charge # Rule for loading at initialization - return batt.InputContainer({'i': load}) + return batt.InputContainer({"i": load}) # Step 3: Simulate to Capacity is insufficient Threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") options = { - 'save_freq': 1000, # Frequency at which results are saved - 'dt': 2, # Timestep - 'events': 'InsufficientCapacity', # Simulate to InsufficientCapacity - 'print': True + "save_freq": 1000, # Frequency at which results are saved + "dt": 2, # Timestep + "events": "InsufficientCapacity", # Simulate to InsufficientCapacity + "print": True, } simulated_results = batt.simulate_to_threshold(future_loading, **options) # Step 4: Plot Results - simulated_results.inputs.plot(ylabel='Current drawn (amps)') - simulated_results.event_states.plot(ylabel='Event States', labels={'EOD': 'State of Charge (SOC)', 'InsufficientCapacity': 'State of Health (SOH)'}) + simulated_results.inputs.plot(ylabel="Current drawn (amps)") + simulated_results.event_states.plot( + ylabel="Event States", + labels={ + "EOD": "State of Charge (SOC)", + "InsufficientCapacity": "State of Health (SOH)", + }, + ) plt.ylim([0, 1]) plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/b261b8e8a2bb840eed75010c0929ce5a/state_limits.py b/docs/_downloads/b261b8e8a2bb840eed75010c0929ce5a/state_limits.py index a60696ea..5d081855 100644 --- a/docs/_downloads/b261b8e8a2bb840eed75010c0929ce5a/state_limits.py +++ b/docs/_downloads/b261b8e8a2bb840eed75010c0929ce5a/state_limits.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating when and how to identify model state limits. +Example demonstrating when and how to identify model state limits. In this example, state limits are defined for the ThrownObject Model. These are limits on the range of each state for a state-transition model. The use of this feature is then demonstrated. """ @@ -10,58 +10,70 @@ from math import inf from progpy.models.thrown_object import ThrownObject + def run_example(): # Demo model # Step 1: Create instance of model (without drag) - m = ThrownObject( cd = 0 ) + m = ThrownObject(cd=0) # Step 2: add state limits m.state_limits = { # object may not go below ground height - 'x': (0, inf), - + "x": (0, inf), # object may not exceed the speed of light - 'v': (-299792458, 299792458) + "v": (-299792458, 299792458), } # Step 3: Simulate to impact - event = 'impact' - simulated_results = m.simulate_to_threshold(threshold_keys=[event], dt=0.005, save_freq=1) - + event = "impact" + simulated_results = m.simulate_to_threshold( + threshold_keys=[event], dt=0.005, save_freq=1 + ) + # Print states - print('Example 1') + print("Example 1") for i, state in enumerate(simulated_results.states): - print(f'State {i}: {state}') + print(f"State {i}: {state}") print() # Let's try setting x to a number outside of its bounds - x0 = m.initialize(u = {}, z = {}) - x0['x'] = -1 + x0 = m.initialize(u={}, z={}) + x0["x"] = -1 - simulated_results = m.simulate_to_threshold(threshold_keys=[event], dt=0.005, save_freq=1, x=x0) + simulated_results = m.simulate_to_threshold( + threshold_keys=[event], dt=0.005, save_freq=1, x=x0 + ) # Print states - print('Example 2') + print("Example 2") for i, state in enumerate(simulated_results.states): - print('State ', i, ': ', state) + print("State ", i, ": ", state) print() # Let's see what happens when the objects speed aproaches its limit - x0 = m.initialize(u = {}, z = {}) - x0['x'] = 1000000000 - x0['v'] = 0 - m.parameters['g'] = -50000000 - - print('Example 3') - simulated_results = m.simulate_to_threshold(threshold_keys=[event], dt=0.005, save_freq=0.3, x=x0, print=True, progress=False) + x0 = m.initialize(u={}, z={}) + x0["x"] = 1000000000 + x0["v"] = 0 + m.parameters["g"] = -50000000 + + print("Example 3") + simulated_results = m.simulate_to_threshold( + threshold_keys=[event], + dt=0.005, + save_freq=0.3, + x=x0, + print=True, + progress=False, + ) # Note that the limits can also be applied manually using the apply_limits function - print('limiting states') - x = {'x': -5, 'v': 3e8} # Too fast and below the ground - print('\t Pre-limit: {}'.format(x)) + print("limiting states") + x = {"x": -5, "v": 3e8} # Too fast and below the ground + print("\t Pre-limit: {}".format(x)) x = m.apply_limits(x) - print('\t Post-limit: {}'.format(x)) + print("\t Post-limit: {}".format(x)) + -# This allows the module to be executed directly -if __name__=='__main__': - run_example() \ No newline at end of file +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/b39e07bddb5cb7b7385002979b39abd1/03_Existing Models.ipynb b/docs/_downloads/b39e07bddb5cb7b7385002979b39abd1/03_Existing Models.ipynb new file mode 100644 index 00000000..9dbe7380 --- /dev/null +++ b/docs/_downloads/b39e07bddb5cb7b7385002979b39abd1/03_Existing Models.ipynb @@ -0,0 +1,732 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3. Using Included ProgPy Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "ProgPy is distributed with a few pre-constructed models that can be used in simulation or prognostics. These models for batteries, pumps, valves, among others, are included in the `progpy.models` package.\n", + "\n", + "In this notebook, we will be exploring a generalized overview of each included model. For more in-depth descriptions of the included models, please refer to the [Included Models](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## Table of Contents\n", + "* [Battery Models](#Battery-Models)\n", + " * [Battery Circuit](#BatteryCircuit)\n", + " * [BatteryElectroChemEOD](#BatteryElectroChemEOD)\n", + " * [BatteryElectroChemEOL](#BatteryElectroChemEOL)\n", + " * [Combined BatteryElectroChem (BatteryElectroChemEODEOL)](#Combined-BatteryElectroChem-(BatteryElectroChemEODEOL))\n", + " * [Simplified Battery](#Simplified-Battery)\n", + "* [Centrifugal Pump Model](#Centrifugal-Pump-Model)\n", + "* [Electric Powertrain Models](#Electric-Powertrain-Models)\n", + "* [Pneumatic Valve Model](#Pneumatic-Valve-Model)\n", + "* [Aircraft Flight Model](#Aircraft-Flight-Model)\n", + "* [Discrete State Model](#Discrete-State-Model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Battery Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will start by introducing the battery models: `BatteryCircuit`, `BatteryElectroChemEOD`, `BatteryElectroChemEOL`, combined `BatteryElectroChem` (`BatteryElectroChemEODEOL`), and `SimplifiedBattery`.\n", + "\n", + "In the following battery models, with the exception of `SimplifiedBattery`, the default model parameters included are for Li-ion batteries, specifically 18650-type cells. Experimental discharge curves for these cells can be downloaded from the Prognostics Center of Excellence [Data Repository](https://www.nasa.gov/intelligent-systems-division/discovery-and-systems-health/pcoe/pcoe-data-set-repository/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### BatteryCircuit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this first example, we will demonstrate how to set up, configure, and use the `BatteryCiruit` model. The `BatteryCircuit` model is a vectorized prognostics model for a battery, represented by an equivalent circuit model as described in [[Daigle Sankararaman 2013]](https://papers.phmsociety.org/index.php/phmconf/article/view/2253).\n", + "\n", + "We will start by importing the model and initializing a battery instance with default settings." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit\n", + "\n", + "batt = BatteryCircuit()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Information is passed to and from the model using containers that function like dictionaries. The keys of the containers are specific to the model. Let's look at the inputs (loading) and outputs (measurements) for the `BatteryCircuit` model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we refer to the `Circuit` tab under the battery models section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html), we can see that the input `i` refers to the current draw on the battery. The outputs `t` refers to the temperature in units Kelvin and `v` refers to voltage.\n", + "\n", + "We can also print out what events we're predicting and the internal states the model uses to represent the system." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"event(s): \", batt.events)\n", + "print(\"states: \", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that this particular model only predicts one event, called `EOD` (End of Discharge). The states listed include `tb`, the battery temperature in K; `qb`, the charge stored in Capacitor Cb of the equivalent circuit model; `qcp`, the charge stored in Capacitor Ccp of the equivalent circuit model; and `qcs`, the charge stored in Capacitor Ccs of the equivalent circuit model.\n", + "\n", + "Let's now look at the model's configuration parameters, which describe the specific system (in this case, the battery) that the model is simulating." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pprint import pprint\n", + "\n", + "print(\"Model configuration:\")\n", + "pprint(batt.parameters)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now use the model to do a simulation. To do this, we will first need to set a configuration and define a future load. For more details on future loading, refer to the related section in __[01 Simulation](01_Simulation.ipynb#Future-Loading)__." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"save_freq\": 100, \"dt\": 2, \"t0\": 700}\n", + "\n", + "\n", + "def future_loading(t, x=None):\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return batt.InputContainer({\"i\": i})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's run the simulation and plot the inputs and outputs. We can do this using the built-in [plot method](https://nasa.github.io/progpy/api_ref/progpy/SimResult.html#progpy.sim_result.SimResult.plot) based on matplotlib or with other imported plotting libraries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the input plot, we can see the current drawn change based on the logic we defined in the future loading function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current draw (amps)\", title=\"BatteryCircuit Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the output plots, we can observe how different input current draws affect the temperature and voltage curves. Generally, the graphs indicate that drawing a higher current leads to higher temperatures and lower voltage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " keys=[\"t\"],\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"temperature (K)\",\n", + " figsize=(10, 4),\n", + " title=\"BatteryCircuit Outputs\",\n", + ")\n", + "fig2 = simulated_results.outputs.plot(\n", + " keys=[\"v\"], xlabel=\"time (s)\", ylabel=\"voltage (V)\", figsize=(10, 4)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### BatteryElectroChemEOD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`BatteryElectroChemEOD` is a vectorized prognostics model for a battery, represented by an electrochemical equations as described in [[Daigle 2013]](https://papers.phmsociety.org/index.php/phmconf/article/view/2252). This model predicts the end of discharge event. Let's start by examining the model inputs, outputs, event(s), and states. We can refer to the `ElectroChem (EOD)` tab under the battery models section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOD\n", + "\n", + "batt = BatteryElectroChemEOD()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now run a simulation until `EOD`, or end of discharge. We wil use the same future loading function as the previous example and specify the configuration threshold event as `EOD`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"save_freq\": 100, \"dt\": 2, \"events\": [\"EOD\"]}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the input plot, we can see the current draw change based on the future loading function we defined." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current draw (amps)\", title=\"BatteryElectroChemEOD Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the output plots, we can see changes in voltage and temperature. We can also print parameters like `VEOD`, or the end of discharge voltage threshold. This value is the voltage at which a battery is considered fully discharged." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " keys=[\"v\"],\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"voltage (V)\",\n", + " figsize=(10, 4),\n", + " title=\"BatteryElectroChemEOD Outputs\",\n", + ")\n", + "print(\"End of discharge voltage threshold:\", batt.parameters[\"VEOD\"])\n", + "\n", + "fig2 = simulated_results.outputs.plot(\n", + " keys=[\"t\"], xlabel=\"time (s)\", ylabel=\"temperature (°C)\", figsize=(10, 4)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the event state plot, we can see `EOD` decline until it reaches 0, or when the end of discharge event has occurred. This event occurence is when the simulation reached threshold and ended." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"event state\",\n", + " labels={\"EOD\"},\n", + " title=\"BatteryElectroChemEOD Event State\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### BatteryElectroChemEOL" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`BatteryElectroChemEOL` is a vectorized prognostics model for battery degradation, represented by an electrochemical model as described in [[Daigle 2016]](https://arc.aiaa.org/doi/pdf/10.2514/6.2016-2132). Let's go ahead and import the model, initialize a battery instance, and take a closer look at the details. We can also refer to the `ElectroChem (EOL)` tab under the battery model section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html). Note that the model has no outputs. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOL\n", + "\n", + "batt = BatteryElectroChemEOL()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now run a simulation to predict when we will reach insufficient battery capacity. We will use the same future loading function as the previous examples and specify the configuration threshold event as `InsufficientCapacity`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"save_freq\": 100, \"dt\": 2, \"events\": [\"InsufficientCapacity\"]}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the input plot, we can once again see the current draw change based on the future loading function we defined." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current draw (amps)\", title=\"BatteryElectroChemEOL Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the event state plot, we can see `InsufficientCapacity` linearly decrease until it reaches 0, or when the event has occurred." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"event state\",\n", + " labels={\"InsufficientCapacity\"},\n", + " title=\"BatteryElectroChemEOL Event State\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Combined BatteryElectroChem (BatteryElectroChemEODEOL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`BatteryElectroChemEODEOL` is a prognostics model for battery degradation and discharge, represented by an electrochemical model as described in [[Daigle 2013]](https://papers.phmsociety.org/index.php/phmconf/article/view/2252) and [[Daigle 2016]](https://arc.aiaa.org/doi/pdf/10.2514/6.2016-2132). This model combines both the `BatteryElectroChemEOL` and `BatteryElectroChemEOD` models.\n", + "\n", + "We will start by importing the model, initializing a battery instance, and examining the model details. We can refer to the `ElectroChem (Combo)` tab under the battery model section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChem\n", + "\n", + "batt = BatteryElectroChem()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will simulate a battery until `EOL` (End of Life). As battery capacity decreases with use, `EOL` is reached when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity).\n", + "\n", + "We will now set the configuration and define a future loading function. As we want to simulate until `EOL`, we will set the configuration event to `InsufficientCapacity`. The future loading function is designed to charge the battery until `EOD` is 0.95 and then discharge until `EOD` is 0.05. Note that states represent the progress towards the event occurring. An event state of 0 indicates the event has occurred and 1 indicates no progress towards the event." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 1000,\n", + " \"dt\": 2,\n", + " \"events\": \"InsufficientCapacity\",\n", + "}\n", + "\n", + "\n", + "def future_loading(t, x=None):\n", + " load = 1\n", + "\n", + " if x is not None:\n", + " event_state = batt.event_state(x)\n", + " if event_state[\"EOD\"] > 0.95:\n", + " load = 1 # Discharge\n", + " elif event_state[\"EOD\"] < 0.05:\n", + " load = -1 # Charge\n", + "\n", + " return batt.InputContainer({\"i\": load})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now simulate to the threshold and print the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now plot the inputs, outputs, and event states. In the input plot, we can see the current drawn fluctuates between -1 and 1 based on the current load we defined in the future loading function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current drawn (amps)\", title=\"BatteryElectroChem Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the output plots, we can see changes in the voltage and temperature." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " keys=[\"v\"],\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"voltage (V)\",\n", + " figsize=(10, 4),\n", + " title=\"BatteryElectroChem Outputs\",\n", + ")\n", + "fig2 = simulated_results.outputs.plot(\n", + " keys=[\"t\"], xlabel=\"time (s)\", ylabel=\"temperature (°C)\", figsize=(10, 4)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the event states plot, we can see `EOD` incrementally spiking and `InsufficientCapacity` linearly declining until it reaches 0, or when the event has occurred." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"event states\",\n", + " labels={\"EOD\", \"InsufficientCapacity\"},\n", + " title=\"BatteryElectroChem Event States\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Simplified Battery" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`SimplifiedBattery` is a model from [[Sierra 2019]](https://www.sciencedirect.com/science/article/abs/pii/S0951832018301406). It was initially introduced in the __[2024 PHM Tutorial](2024PHMTutorial.ipynb)__. Unlike the previous models, the default parameters are for a Tattu battery. We can refer to the `Simplified` tab under the battery model section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) for more details.\n", + "\n", + "Let's start by importing the model, initializing an instance, and examining it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import SimplifiedBattery\n", + "from progpy.loading import Piecewise\n", + "\n", + "batt = SimplifiedBattery()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now define future loading based on a piecewise function and simulate to a set time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "future_loading = Piecewise(\n", + " dict, [600, 900, 1800, 3000, float(\"inf\")], {\"P\": [25, 12, 50, 25, 33]}\n", + ")\n", + "\n", + "simulated_results = batt.simulate_to(200, future_loading, {\"v\": 4.183})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's look at the event states plot, where we can see `EOD` and `Low V`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\", ylabel=\"event state\", title=\"SimplifiedBattery Event States\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Centrifugal Pump Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Electric Powertrain Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pneumatic Valve Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Aircraft Flight Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Discrete State Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.10.7 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.7" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "610c699f0cd8c4f129acd9140687fff6866bed0eb8e82f249fc8848b827b628c" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/b547422d9b3ae9d9e32d7bee6c092275/events.py b/docs/_downloads/b547422d9b3ae9d9e32d7bee6c092275/events.py index f338d9ce..d6e46c7f 100644 --- a/docs/_downloads/b547422d9b3ae9d9e32d7bee6c092275/events.py +++ b/docs/_downloads/b547422d9b3ae9d9e32d7bee6c092275/events.py @@ -2,27 +2,29 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example further illustrating the concept of 'events' which generalizes EOL. +Example further illustrating the concept of 'events' which generalizes EOL. .. dropdown:: More details :term:`Events` is the term used to describe something to be predicted. Generally in the PHM community these are referred to as End of Life (EOL). However, they can be much more. - In progpy, events can be anything that needs to be predicted. Events can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). + In progpy, events can be anything that needs to be predicted. Events can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI). - This example demonstrates how events can be used in your applications. + This example demonstrates how events can be used in your applications. """ + import matplotlib.pyplot as plt from progpy.loading import Piecewise from progpy.models import BatteryElectroChemEOD + def run_example(): # Example: Warning thresholds # In this example we will use the battery model # We of course are interested in end of discharge, but for this example we # have a requirement that says the battery must not fall below 5% State of Charge (SOC) # Note: SOC is the event state for the End of Discharge (EOD) event - # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occurred. + # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occurred. # So, 5% SOC corresponds to an 'EOD' event state of 0.05 # Additionally, we have two warning thresholds (yellow and red) @@ -32,7 +34,11 @@ def run_example(): # Step 1: Extend the battery model to define the additional events class MyBatt(BatteryElectroChemEOD): - events = BatteryElectroChemEOD.events + ['EOD_warn_yellow', 'EOD_warn_red', 'EOD_requirement_threshold'] + events = BatteryElectroChemEOD.events + [ + "EOD_warn_yellow", + "EOD_warn_red", + "EOD_requirement_threshold", + ] def event_state(self, state): # Get event state from parent @@ -41,9 +47,15 @@ def event_state(self, state): # Add yellow, red, and failure states by scaling EOD state # Here we scale so the threshold SOC is 0 by their associated events, while SOC of 1 is still 1 # For example, for yellow we want EOD_warn_yellow to be 1 when SOC is 1, and 0 when SOC is YELLOW_THRESH or lower - event_state['EOD_warn_yellow'] = (event_state['EOD']-YELLOW_THRESH)/(1-YELLOW_THRESH) - event_state['EOD_warn_red'] = (event_state['EOD']-RED_THRESH)/(1-RED_THRESH) - event_state['EOD_requirement_threshold'] = (event_state['EOD']-THRESHOLD)/(1-THRESHOLD) + event_state["EOD_warn_yellow"] = (event_state["EOD"] - YELLOW_THRESH) / ( + 1 - YELLOW_THRESH + ) + event_state["EOD_warn_red"] = (event_state["EOD"] - RED_THRESH) / ( + 1 - RED_THRESH + ) + event_state["EOD_requirement_threshold"] = ( + event_state["EOD"] - THRESHOLD + ) / (1 - THRESHOLD) # Return return event_state @@ -54,9 +66,11 @@ def threshold_met(self, x): # Add yell and red states from event_state event_state = self.event_state(x) - t_met['EOD_warn_yellow'] = event_state['EOD_warn_yellow'] <= 0 - t_met['EOD_warn_red'] = event_state['EOD_warn_red'] <= 0 - t_met['EOD_requirement_threshold'] = event_state['EOD_requirement_threshold'] <= 0 + t_met["EOD_warn_yellow"] = event_state["EOD_warn_yellow"] <= 0 + t_met["EOD_warn_red"] = event_state["EOD_warn_red"] <= 0 + t_met["EOD_requirement_threshold"] = ( + event_state["EOD_requirement_threshold"] <= 0 + ) return t_met @@ -65,20 +79,22 @@ def threshold_met(self, x): # 2a: Setup model - # Variable (piece-wise) future loading scheme - # For a battery, future loading is in term of current 'i' in amps. + # Variable (piece-wise) future loading scheme + # For a battery, future loading is in term of current 'i' in amps. future_loading = Piecewise( - m.InputContainer, - [600, 900, 1800, 3000, float('inf')], - {'i': [2, 1, 4, 2, 3]}) - + m.InputContainer, [600, 900, 1800, 3000, float("inf")], {"i": [2, 1, 4, 2, 3]} + ) + # 2b: Simulate to threshold - simulated_results = m.simulate_to_threshold(future_loading, threshold_keys=['EOD'], print = True) + simulated_results = m.simulate_to_threshold( + future_loading, threshold_keys=["EOD"], print=True + ) # 2c: Plot results simulated_results.event_states.plot() plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/b5b078b0fe440e9fa4d922ec9f4f1eea/basic_example.py b/docs/_downloads/b5b078b0fe440e9fa4d922ec9f4f1eea/basic_example.py new file mode 100644 index 00000000..3cbfae17 --- /dev/null +++ b/docs/_downloads/b5b078b0fe440e9fa4d922ec9f4f1eea/basic_example.py @@ -0,0 +1,143 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + +""" +This example performs a state estimation and prediction with uncertainty given a Prognostics Model. + +Method: An instance of the ThrownObject model in progpy is created, and the prediction process is achieved in three steps: + 1) State estimation of the current state is performed using a chosen state_estimator, and samples are drawn from this estimate + 2) Prediction of future states (with uncertainty) and the times at which the event threshold will be reached + 3) Metrics tools are used to further investigate the results of prediction +Results: + i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction + ii) Time event is predicted to occur (with uncertainty) + iii) Various prediction metrics + iv) Figures illustrating results +""" + +from progpy.models import ThrownObject +from progpy import * + + +def run_example(): + # Step 1: Setup model & future loading + m = ThrownObject(process_noise=1) + initial_state = m.initialize() + + # Step 2: Demonstrating state estimator + # The state estimator is used to estimate the system state given sensor data. + print("\nPerforming State Estimation Step") + + # Step 2a: Setup + filt = state_estimators.ParticleFilter(m, initial_state) + # VVV Uncomment this to use UKF State Estimator VVV + # filt = state_estimators.UnscentedKalmanFilter(m, initial_state) + + # Step 2b: Print & Plot Prior State + print("Prior State:", filt.x.mean) + print("\nevent state: ", m.event_state(filt.x.mean)) + fig = filt.x.plot_scatter(label="prior") + + # Step 2c: Perform state estimation step, given some measurement, above what's expected + example_measurements = m.OutputContainer({"x": 7.5}) + t = 0.1 + u = m.InputContainer({}) + filt.estimate( + t, u, example_measurements + ) # Update state, given (example) sensor data + + # Step 2d: Print & Plot Resulting Posterior State + # Note the posterior state is greater than the predicted state of 5.95 + # This is because of the high measurement + print("\nPosterior State:", filt.x.mean) + # Event state for 'falling' is less, because velocity has decreased + print("\nEvent State: ", m.event_state(filt.x.mean)) + filt.x.plot_scatter( + fig=fig, label="posterior" + ) # Add posterior state to figure from prior state + + # Note: in a prognostic application the above state estimation step would be repeated each time + # there is new data. Here we're doing one step to demonstrate how the state estimator is used + + # Step 3: Demonstrating Prediction + print("\n\nPerforming Prediction Step") + + # Step 3a: Setup Predictor + mc = predictors.MonteCarlo(m) + + # Step 3b: Perform a prediction + NUM_SAMPLES = 50 + STEP_SIZE = 0.01 + mc_results = mc.predict( + filt.x, n_samples=NUM_SAMPLES, dt=STEP_SIZE, save_freq=STEP_SIZE + ) + print("Predicted time of event (ToE): ", mc_results.time_of_event.mean) + # Here there are 2 events predicted, when the object starts falling, and when it impacts the ground. + + # Step 3c: Analyze the results + + # Note: The results of a sample-based prediction can be accessed by sample, e.g., + states_sample_1 = mc_results.states[1] + # now states_sample_1[n] corresponds to times[n] for the first sample + + # You can also access a state distribution at a specific time using the .snapshot function + states_time_1 = mc_results.states.snapshot(1) + # now you have all the samples corresponding to times[1] + + # You can also access the final state (of type UncertainData), like so: + # Note: to get a more accurate final state, you can decrease the step size. + final_state = mc_results.time_of_event.final_state + print("State when object starts falling: ", final_state["falling"].mean) + + # You can also use the metrics package to generate some useful metrics on the result of a prediction + print("\nEOD Prediction Metrics") + + from progpy.metrics import prob_success + + print( + "\tPortion between 3.65 and 3.8: ", + mc_results.time_of_event.percentage_in_bounds([3.65, 3.8], keys="falling"), + ) + print( + "\tAssuming ground truth 3.7: ", + mc_results.time_of_event.metrics(ground_truth=3.7, keys="falling"), + ) + print( + "\tP(Success) if mission ends at 7.6: ", + prob_success(mc_results.time_of_event, 7.6, keys="impact"), + ) + + # Plot state transition + # Here we will plot the states at t0, 25% to ToE, 50% to ToE, 75% to ToE, and ToE + # You should see the states move together (i.e., velocity is lowest and highest when closest to the ground (before impact, and at beginning, respectively)) + fig = mc_results.states.snapshot(0).plot_scatter( + label="t={} s".format(int(mc_results.times[0])) + ) # 0 + quarter_index = int(len(mc_results.times) / 4) + mc_results.states.snapshot(quarter_index).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index])) + ) # 25% + mc_results.states.snapshot(quarter_index * 2).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 2])) + ) # 50% + mc_results.states.snapshot(quarter_index * 3).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[quarter_index * 3])) + ) # 75% + mc_results.states.snapshot(-1).plot_scatter( + fig=fig, label="t={} s".format(int(mc_results.times[-1])) + ) # 100% + + # Plot time of event for each event + # If you dont see many bins here, this is because there is not much variety in the estimate. + # You can increase the number of bins, decrease step size, or increase the number of samples to see more of a distribution + mc_results.time_of_event.plot_hist(keys="impact") + mc_results.time_of_event.plot_hist(keys="falling") + + # Step 4: Show all plots + import matplotlib.pyplot as plt # For plotting + + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/b7b9435143e35d3d4e55cfd759cffb5a/generate_surrogate.py b/docs/_downloads/b7b9435143e35d3d4e55cfd759cffb5a/generate_surrogate.py new file mode 100644 index 00000000..63d4b3cc --- /dev/null +++ b/docs/_downloads/b7b9435143e35d3d4e55cfd759cffb5a/generate_surrogate.py @@ -0,0 +1,204 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. + +""" +Example of generating a Dynamic Mode Decomposition surrogate model from a battery model. + +.. dropdown:: More details + + In this example, an instance of a battery model is created. The DMD DataModel is used to generate a surrogate of this battery model for specific loading schemes. This surrogate can be used in place of the original model, approximating it's behavior. Frequently, surrogate models run faster than the original, at the cost of some accuracy. The performance of the two models are then compared. + +""" + +import matplotlib.pyplot as plt +from progpy.models import BatteryElectroChemEOD as Battery + + +def run_example(): + ### Example 1: Standard DMD Application + ## Step 1: Create a model object + batt = Battery() + + ## Step 2: Define future loading functions for training data + # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired + def future_loading_1(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 500: + i = 3 + elif t < 1000: + i = 2 + elif t < 1500: + i = 0.5 + else: + i = 4.5 + return batt.InputContainer({"i": i}) + + def future_loading_2(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 300: + i = 2 + elif t < 800: + i = 3.5 + elif t < 1300: + i = 4 + elif t < 1600: + i = 1.5 + else: + i = 5 + return batt.InputContainer({"i": i}) + + load_functions = [future_loading_1, future_loading_2] + + ## Step 3: generate surrogate model + # Simulation options for training data and surrogate model generation + # Note: here dt is less than save_freq. This means the model will iterate forward multiple steps per saved point. + # This is commonly done to ensure accuracy. + options_surrogate = { + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data_to": 0.7, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + } + + # Set noise in Prognostics Model, default for surrogate model is also this value + batt.parameters["process_noise"] = 0 + + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) + + ## Step 4: Use surrogate model + # Simulation options for implementation of surrogate model + options_sim = { + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results + } + + # Define loading profile + def future_loading(t, x=None): + if t < 600: + i = 3 + elif t < 1000: + i = 2 + elif t < 1500: + i = 1.5 + else: + i = 4 + return batt.InputContainer({"i": i}) + + # Simulate to threshold using DMD approximation + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) + + # Calculate Error + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 1 MSE:", MSE) + # Not a very good approximation + + # Plot results + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 1 Input") + simulated_results.outputs.plot( + ylabel="Predicted Outputs (temperature and voltage)", + title="Example 1 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 1 Predicted SOC" + ) + + # To visualize the accuracy of the approximation, run the high-fidelity model + options_hf = { + "dt": 0.1, + "save_freq": 1, + } + high_fidelity_results = batt.simulate_to_threshold(future_loading, **options_hf) + + # Save voltage results to compare + voltage_dmd = [ + simulated_results.outputs[iter1]["v"] + for iter1 in range(len(simulated_results.times)) + ] + voltage_hf = [ + high_fidelity_results.outputs[iter2]["v"] + for iter2 in range(len(high_fidelity_results.times)) + ] + + plt.subplots() + plt.plot(simulated_results.times, voltage_dmd, "-b", label="DMD approximation") + plt.plot( + high_fidelity_results.times, voltage_hf, "--r", label="High fidelity result" + ) + plt.legend() + plt.title("Comparing DMD approximation to high-fidelity model results") + + ### Example 2: Add process_noise to the surrogate model + # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate) + surrogate.parameters["process_noise"] = 1e-04 + surrogate.parameters["process_noise_dist"] = "normal" + + # Simulate to threshold using DMD approximation + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) + + # Plot results + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 2 Input") + simulated_results.outputs.plot( + keys=["v"], + ylabel="Predicted Voltage (volts)", + title="Example 2 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 2 Predicted SOC" + ) + + ### Example 3: Generate surrogate model with a subset of internal states, inputs, and/or outputs + # Note: we use the same loading profiles as defined in Ex. 1 + + ## Generate surrogate model + # Simulation options for training data and surrogate model generation + options_surrogate = { + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data": 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + "state_keys": [ + "Vsn", + "Vsp", + "tb", + ], # Define internal states to be included in surrogate model + "output_keys": ["v"], # Define outputs to be included in surrogate model + } + + # Set noise in Prognostics Model, default for surrogate model is also this value + batt.parameters["process_noise"] = 0 + + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) + + ## Use surrogate model + # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. + # The surrogate model results will be faster but less accurate than the original model. + + # Simulation options for implementation of surrogate model + options_sim = { + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results + } + + # Simulate to threshold using DMD approximation + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) + + # Calculate Error + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 3 MSE:", MSE) + + # Plot results + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 3 Input") + simulated_results.outputs.plot( + ylabel="Outputs (voltage)", title="Example 3 Predicted Output" + ) + simulated_results.event_states.plot( + ylabel="State of Charge", title="Example 3 Predicted SOC" + ) + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/b997c950df7e2f695c19319712091ba3/measurement_eqn_example.py b/docs/_downloads/b997c950df7e2f695c19319712091ba3/measurement_eqn_example.py index eb5f32c4..86145e4e 100644 --- a/docs/_downloads/b997c950df7e2f695c19319712091ba3/measurement_eqn_example.py +++ b/docs/_downloads/b997c950df7e2f695c19319712091ba3/measurement_eqn_example.py @@ -1,12 +1,12 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. """ -This example performs a state estimation with uncertainty given a Prognostics Model for a system in which not all output values are measured. - -Method: An instance of the BatteryCircuit model in prog_models is created. We assume that we are only measuring one of the output values, and we define a subclass to remove the other output value. +This example performs a state estimation with uncertainty given a Prognostics Model for a system in which not all output values are measured. + +Method: An instance of the BatteryCircuit model in prog_models is created. We assume that we are only measuring one of the output values, and we define a subclass to remove the other output value. Estimation of the current state is performed at various time steps, using the defined state_estimator. -Results: +Results: i) Estimate of the current state given various times ii) Display of results, such as prior and posterior state estimate values and SOC """ @@ -17,65 +17,67 @@ from prog_algs import * + def run_example(): # Step 1: Subclass model with measurement equation # In this case we're only measuring 'v' (i.e., removing temperature) # To do this we're creating a new class that's subclassed from the complete model. # To change the outputs we just have to override outputs (the list of keys) class MyBattery(Battery): - outputs = ['v'] + outputs = ["v"] # Step 2: Setup model & future loading batt = MyBattery() loads = [ # Define loads here to accelerate prediction - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 1}), - batt.InputContainer({'i': 4}), - batt.InputContainer({'i': 2}), - batt.InputContainer({'i': 3}) + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 1}), + batt.InputContainer({"i": 4}), + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 3}), ] - def future_loading(t, x = None): - # Variable (piece-wise) future loading scheme - if (t < 600): + + def future_loading(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 600: return loads[0] - elif (t < 900): + elif t < 900: return loads[1] - elif (t < 1800): + elif t < 1800: return loads[2] - elif (t < 3000): + elif t < 3000: return loads[3] return loads[-1] - x0 = batt.parameters['x0'] + x0 = batt.parameters["x0"] # Step 3: Use the updated model filt = state_estimators.ParticleFilter(batt, x0) # Step 4: Run step and print results - print('Running state estimation step with only one of 2 outputs measured') + print("Running state estimation step with only one of 2 outputs measured") # Print Prior print("\nPrior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) # Estimate Step # Note, only voltage was needed in the measurement step, since that is the only output we're measuring t = 0.1 load = future_loading(t) - filt.estimate(t, load, {'v': 3.915}) + filt.estimate(t, load, {"v": 3.915}) # Print Posterior print("\nPosterior State:", filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) # Another Estimate Step t = 0.2 load = future_loading(t) - filt.estimate(t, load, {'v': 3.91}) + filt.estimate(t, load, {"v": 3.91}) # Print Posterior Again print("\nPosterior State (t={}):".format(t), filt.x.mean) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) # Note that the particle filter was still able to perform state estimation. # The updated outputs can be used for any case where the measurement doesn't match the model outputs @@ -84,29 +86,31 @@ def future_loading(t, x = None): parent = Battery() - class MyBattery(Battery): - outputs = ['tv'] # output is temperature * voltage (for some reason) + outputs = ["tv"] # output is temperature * voltage (for some reason) def output(self, x): - parent.parameters = self.parameters # only needed if you expect to change parameters + parent.parameters = ( + self.parameters + ) # only needed if you expect to change parameters z = parent.output(x) - return self.OutputContainer({'tv': z['v'] * z['t']}) + return self.OutputContainer({"tv": z["v"] * z["t"]}) batt = MyBattery() filt = state_estimators.ParticleFilter(batt, x0) - print('-----------------\n\nExample 2') + print("-----------------\n\nExample 2") print("\nPrior State:", filt.x.mean) print("\toutput: ", batt.output(filt.x.mean)) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) t = 0.1 load = future_loading(t) - filt.estimate(t, load, {'tv': 80}) + filt.estimate(t, load, {"tv": 80}) print("\nPosterior State:", filt.x.mean) print("\toutput: ", batt.output(filt.x.mean)) - print('\tSOC: ', batt.event_state(filt.x.mean)['EOD']) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/b9f89cfe4825dfd3e1ac080af9dcf54e/noise.py b/docs/_downloads/b9f89cfe4825dfd3e1ac080af9dcf54e/noise.py index ddc3b9a9..27cab5ba 100644 --- a/docs/_downloads/b9f89cfe4825dfd3e1ac080af9dcf54e/noise.py +++ b/docs/_downloads/b9f89cfe4825dfd3e1ac080af9dcf54e/noise.py @@ -8,106 +8,124 @@ import matplotlib.pyplot as plt from progpy.models.thrown_object import ThrownObject + def run_example(): # Define future loading - def future_load(t=None, x=None): + def future_load(t=None, x=None): # The thrown object model has no inputs- you cannot load the system (i.e., affect it once it's in the air) # So we return an empty input container return m.InputContainer({}) # Define configuration for simulation config = { - 'threshold_keys': 'impact', # Simulate until the thrown object has impacted the ground - 'dt': 0.005, # Time step (s) - 'save_freq': 0.5, # Frequency at which results are saved (s) + "threshold_keys": "impact", # Simulate until the thrown object has impacted the ground + "dt": 0.005, # Time step (s) + "save_freq": 0.5, # Frequency at which results are saved (s) } # Define a function to print the results - will be used later def print_results(simulated_results): # Print results - print('states:') - for (t,x) in zip(simulated_results.times, simulated_results.states): - print('\t{:.2f}s: {}'.format(t, x)) + print("states:") + for t, x in zip(simulated_results.times, simulated_results.states): + print("\t{:.2f}s: {}".format(t, x)) - print('outputs:') - for (t,x) in zip(simulated_results.times, simulated_results.outputs): - print('\t{:.2f}s: {}'.format(t, x)) + print("outputs:") + for t, x in zip(simulated_results.times, simulated_results.outputs): + print("\t{:.2f}s: {}".format(t, x)) - print('\nimpact time: {:.2f}s'.format(simulated_results.times[-1])) + print("\nimpact time: {:.2f}s".format(simulated_results.times[-1])) # The simulation stopped at impact, so the last element of times is the impact time # Plot results simulated_results.states.plot() # Ex1: No noise - m = ThrownObject(process_noise = False) + m = ThrownObject(process_noise=False) simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex1: No noise') + plt.title("Ex1: No noise") # Ex2: with noise - same noise applied to every state process_noise = 15 - m = ThrownObject(process_noise = process_noise) # Noise with a std of 0.5 to every state - print('\nExample without same noise for every state') + m = ThrownObject( + process_noise=process_noise + ) # Noise with a std of 0.5 to every state + print("\nExample without same noise for every state") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex2: Basic Noise') + plt.title("Ex2: Basic Noise") # Ex3: noise- more noise on position than velocity - process_noise = {'x': 30, 'v': 1} - m = ThrownObject(process_noise = process_noise) - print('\nExample with more noise on position than velocity') + process_noise = {"x": 30, "v": 1} + m = ThrownObject(process_noise=process_noise) + print("\nExample with more noise on position than velocity") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex3: More noise on position') + plt.title("Ex3: More noise on position") # Ex4: noise- Ex3 but uniform - process_noise_dist = 'uniform' - model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise} - m = ThrownObject(**model_config) - print('\nExample with more uniform noise') + process_noise_dist = "uniform" + model_config = { + "process_noise_dist": process_noise_dist, + "process_noise": process_noise, + } + m = ThrownObject(**model_config) + print("\nExample with more uniform noise") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex4: Ex3 with uniform dist') + plt.title("Ex4: Ex3 with uniform dist") # Ex5: noise- Ex3 but triangle - process_noise_dist = 'triangular' - model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise} - m = ThrownObject(**model_config) - print('\nExample with triangular process noise') + process_noise_dist = "triangular" + model_config = { + "process_noise_dist": process_noise_dist, + "process_noise": process_noise, + } + m = ThrownObject(**model_config) + print("\nExample with triangular process noise") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex5: Ex3 with triangular dist') + plt.title("Ex5: Ex3 with triangular dist") # Ex6: Measurement noise # Everything we've done with process noise, we can also do with measurement noise. - # Just use 'measurement_noise' and 'measurement_noise_dist' - measurement_noise = {'x': 20} # For each output - measurement_noise_dist = 'uniform' - model_config = {'measurement_noise_dist': measurement_noise_dist, 'measurement_noise': measurement_noise} - m = ThrownObject(**model_config) - print('\nExample with measurement noise') - print('- Note: outputs are different than state- this is the application of measurement noise') + # Just use 'measurement_noise' and 'measurement_noise_dist' + measurement_noise = {"x": 20} # For each output + measurement_noise_dist = "uniform" + model_config = { + "measurement_noise_dist": measurement_noise_dist, + "measurement_noise": measurement_noise, + } + m = ThrownObject(**model_config) + print("\nExample with measurement noise") + print( + "- Note: outputs are different than state- this is the application of measurement noise" + ) simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex6: Measurement noise') + plt.title("Ex6: Measurement noise") # Ex7: OK, now for something a little more complicated. Let's try proportional noise on v only (more variation when it's going faster) # This can be used to do custom or more complex noise distributions - def apply_proportional_process_noise(self, x, dt = 1): - x['v'] -= dt*0.5*x['v'] + def apply_proportional_process_noise(self, x, dt=1): + x["v"] -= dt * 0.5 * x["v"] return x - model_config = {'process_noise': apply_proportional_process_noise} + + model_config = {"process_noise": apply_proportional_process_noise} m = ThrownObject(**model_config) - print('\nExample with proportional noise on velocity') + print("\nExample with proportional noise on velocity") simulated_results = m.simulate_to_threshold(future_load, **config) print_results(simulated_results) - plt.title('Ex7: Proportional noise on velocity') + plt.title("Ex7: Proportional noise on velocity") - print('\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value') - print('e.g., numpy.random.seed(42)') + print( + "\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value" + ) + print("e.g., numpy.random.seed(42)") plt.show() -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/bc7386e9d7cbd63f301235db98e7ea1b/dataset.py b/docs/_downloads/bc7386e9d7cbd63f301235db98e7ea1b/dataset.py index 884ad5f7..59d505cb 100644 --- a/docs/_downloads/bc7386e9d7cbd63f301235db98e7ea1b/dataset.py +++ b/docs/_downloads/bc7386e9d7cbd63f301235db98e7ea1b/dataset.py @@ -4,65 +4,74 @@ """ Example downloading and using a NASA prognostics dataset. -In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. +In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted. """ DATASET_ID = 1 + def run_example(): # Step 1: Download and import the dataset for a single battery # Note: This may take some time from progpy.datasets import nasa_battery - print('Downloading... ', end='') + + print("Downloading... ", end="") (desc, data) = nasa_battery.load_data(DATASET_ID) - print('done') + print("done") # We recommend saving the dataset to disk for future use # This way you dont have to download it each time import pickle - pickle.dump((desc, data), open(f'dataset_{DATASET_ID}.pkl', 'wb')) + + pickle.dump((desc, data), open(f"dataset_{DATASET_ID}.pkl", "wb")) # Step 2: Access the dataset description - print(f'\nDataset {DATASET_ID}') - print(desc['description']) - print(f'Procedure: {desc["procedure"]}') + print(f"\nDataset {DATASET_ID}") + print(desc["description"]) + print(f"Procedure: {desc['procedure']}") # Step 3: Access the dataset data # Data is in format [run_id][time][variable] - # For the battery the variables are + # For the battery the variables are # 0: relativeTime (since beginning of run) # 1: current (amps) # 2: voltage # 3: temperature (°C) # so that data[a][b, 3] is the temperature at time index b (relative to the start of the run) for run a - print(f'\nNumber of runs: {len(data)}') - print(f'\nAnalyzing run 4') - print(f'number of time indices: {len(data[4])}') + print(f"\nNumber of runs: {len(data)}") + print("\nAnalyzing run 4") + print(f"number of time indices: {len(data[4])}") print(f"Details of run 4: {desc['runs'][4]}") # Plot the run import matplotlib.pyplot as plt + plt.figure() plt.subplot(2, 1, 1) plt.plot(data[4][:, 0], data[4][:, 1]) - plt.ylabel('Current (A)') + plt.ylabel("Current (A)") plt.subplot(2, 1, 2) plt.plot(data[4][:, 0], data[4][:, 2]) - plt.ylabel('Voltage (V)') - plt.xlabel('Time (s)') - plt.title('Run 4') + plt.ylabel("Voltage (V)") + plt.xlabel("Time (s)") + plt.title("Run 4") # Graph all reference discharge profiles - indices = [i for i, x in enumerate(desc['runs']) if 'reference discharge' in x['desc'] and 'rest' not in x['desc']] + indices = [ + i + for i, x in enumerate(desc["runs"]) + if "reference discharge" in x["desc"] and "rest" not in x["desc"] + ] plt.figure() for i in indices: plt.plot(data[i][:, 0], data[i][:, 2], label=f"Run {i}") - plt.title('Reference discharge profiles') - plt.xlabel('Time (s)') - plt.ylabel('Voltage (V)') + plt.title("Reference discharge profiles") + plt.xlabel("Time (s)") + plt.ylabel("Voltage (V)") plt.show() -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/beb178906ed56d1dc980686684fd5359/direct_model.py b/docs/_downloads/beb178906ed56d1dc980686684fd5359/direct_model.py index 07d009a2..80bda7ff 100644 --- a/docs/_downloads/beb178906ed56d1dc980686684fd5359/direct_model.py +++ b/docs/_downloads/beb178906ed56d1dc980686684fd5359/direct_model.py @@ -11,31 +11,34 @@ import numpy as np from progpy.models import ThrownObject + def run_example(): # Here is how estimating time of event works for a timeseries model m = ThrownObject() x = m.initialize() - print(m.__class__.__name__, "(Direct Model)" if m.is_direct else "(Timeseries Model)") + print( + m.__class__.__name__, "(Direct Model)" if m.is_direct else "(Timeseries Model)" + ) tic = time.perf_counter() - print('Time of event: ', m.time_of_event(x, dt = 0.05)) + print("Time of event: ", m.time_of_event(x, dt=0.05)) toc = time.perf_counter() - print(f'execution: {(toc-tic)*1000:0.4f} milliseconds') + print(f"execution: {(toc - tic) * 1000:0.4f} milliseconds") # Step 1: Define DirectModel # In this case we're extending the ThrownObject model to include the time_to_event method, defined in DirectModel - # In the case of thrown objects, we can solve the differential equation + # In the case of thrown objects, we can solve the differential equation # to estimate the time at which the events occur. class DirectThrownObject(ThrownObject): def time_of_event(self, x, *args, **kwargs): # calculate time when object hits ground given x['x'] and x['v'] # 0 = x0 + v0*t - 0.5*g*t^2 - g = self.parameters['g'] - t_impact = -(x['v'] + np.sqrt(x['v']*x['v'] - 2*g*x['x']))/g + g = self.parameters["g"] + t_impact = -(x["v"] + np.sqrt(x["v"] * x["v"] - 2 * g * x["x"])) / g # 0 = v0 - g*t - t_falling = -x['v']/g - - return {'falling': t_falling, 'impact': t_impact} + t_falling = -x["v"] / g + + return {"falling": t_falling, "impact": t_impact} # Note that adding *args and **kwargs is optional. # Having these arguments makes the function interchangeable with other models @@ -45,18 +48,23 @@ def time_of_event(self, x, *args, **kwargs): m = DirectThrownObject() x = m.initialize() # Using Initial state # Now instead of simulating to threshold, we can estimate it directly from the state, like so - print('\n', m.__class__.__name__, "(Direct Model)" if m.is_direct else "(Timeseries Model)") + print( + "\n", + m.__class__.__name__, + "(Direct Model)" if m.is_direct else "(Timeseries Model)", + ) tic = time.perf_counter() - print('Time of event: ', m.time_of_event(x)) + print("Time of event: ", m.time_of_event(x)) toc = time.perf_counter() - print(f'execution: {(toc-tic)*1000:0.4f} milliseconds') + print(f"execution: {(toc - tic) * 1000:0.4f} milliseconds") - # Notice that execution is MUCH faster for the direct model. + # Notice that execution is MUCH faster for the direct model. # This is even more pronounced for events that occur later in the simulation. - # In this case, the DirectThrownObject has a defined next_state and output equation, + # In this case, the DirectThrownObject has a defined next_state and output equation, # allowing it to be used with a state estimator (e..g, Particle Filter) -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/bec0de5b758ecc33efbc94e5d3fb8d01/custom_model.py b/docs/_downloads/bec0de5b758ecc33efbc94e5d3fb8d01/custom_model.py index bec40ed7..82c66fff 100644 --- a/docs/_downloads/bec0de5b758ecc33efbc94e5d3fb8d01/custom_model.py +++ b/docs/_downloads/bec0de5b758ecc33efbc94e5d3fb8d01/custom_model.py @@ -8,7 +8,7 @@ For most cases, you will be able to use the standard LSTMStateTransitionModel.from_data class with configuration (see the LSTMStateTransitionModel class for more details). However, sometimes you might want to add custom layers, or other complex components. In that case, you will build a custom model and pass it into LSTMStateTransitionModel. - In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. + In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We build and fit a custom model using keras.layers. Finally, we compare performance to the standard format and the original model. """ @@ -20,21 +20,27 @@ from tensorflow import keras from tensorflow.keras import layers + def run_example(): WINDOW = 12 - print('Generating data...') + print("Generating data...") batt = BatteryElectroChemEOD() - future_loading_eqns = [lambda t, x=None: batt.InputContainer({'i': 1+1.4*load}) for load in range(6)] + future_loading_eqns = [ + lambda t, x=None: batt.InputContainer({"i": 1 + 1.4 * load}) + for load in range(6) + ] # Generate data with different loading and step sizes # Adding the step size as an element of the output input_data = [] output_data = [] for i in range(9): - dt = i/3+0.25 + dt = i / 3 + 0.25 for loading_eqn in future_loading_eqns: - d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) - u = np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float) + d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) + u = np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float + ) z = d.outputs if len(u) > WINDOW: @@ -44,36 +50,39 @@ def run_example(): # Step 2: Build standard model print("Building standard model...") m_batt = LSTMStateTransitionModel.from_data( - inputs = input_data, - outputs = output_data, - window=WINDOW, - epochs=30, + inputs=input_data, + outputs=output_data, + window=WINDOW, + epochs=30, units=64, # Additional units given the increased complexity of the system - input_keys = ['i', 'dt'], - output_keys = ['t', 'v']) - m_batt.plot_history() + input_keys=["i", "dt"], + output_keys=["t", "v"], + ) + m_batt.plot_history() # Step 3: Build custom model - print('Building custom model...') - (u_all, z_all, _, _) = LSTMStateTransitionModel.pre_process_data(input_data, output_data, window=12) - + print("Building custom model...") + (u_all, z_all, _, _) = LSTMStateTransitionModel.pre_process_data( + input_data, output_data, window=12 + ) + # Normalize n_inputs = len(input_data[0][0]) - u_mean = np.mean(u_all[:,0,:n_inputs], axis=0) - u_std = np.std(u_all[:,0,:n_inputs], axis=0) - # If there's no variation- don't normalize + u_mean = np.mean(u_all[:, 0, :n_inputs], axis=0) + u_std = np.std(u_all[:, 0, :n_inputs], axis=0) + # If there's no variation- don't normalize u_std[u_std == 0] = 1 z_mean = np.mean(z_all, axis=0) z_std = np.std(z_all, axis=0) - # If there's no variation- don't normalize + # If there's no variation- don't normalize z_std[z_std == 0] = 1 # Add output (since z_t-1 is last input) u_mean = np.hstack((u_mean, z_mean)) u_std = np.hstack((u_std, z_std)) - u_all = (u_all - u_mean)/u_std - z_all = (z_all - z_mean)/z_std + u_all = (u_all - u_mean) / u_std + z_all = (z_all - z_mean) / z_std # u_mean and u_std act on the column vector form (from inputcontainer) # so we need to transpose them to a column vector @@ -88,40 +97,51 @@ def run_example(): x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x) model = keras.Model(inputs, x) model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"]) - history = model.fit(u_all, z_all, epochs=30, callbacks = callbacks, validation_split = 0.1) + history = model.fit( + u_all, z_all, epochs=30, callbacks=callbacks, validation_split=0.1 + ) # Step 4: Build LSTMStateTransitionModel - m_custom = LSTMStateTransitionModel(model, - normalization=normalization, - input_keys = ['i', 'dt'], - output_keys = ['t', 'v'], history=history # Provide history so plot_history will work + m_custom = LSTMStateTransitionModel( + model, + normalization=normalization, + input_keys=["i", "dt"], + output_keys=["t", "v"], + history=history, # Provide history so plot_history will work ) m_custom.plot_history() # Step 5: Simulate - print('Simulating...') + print("Simulating...") t_counter = 0 x_counter = batt.initialize() + def future_loading(t, x=None): - return batt.InputContainer({'i': 3}) + return batt.InputContainer({"i": 3}) - def future_loading2(t, x = None): + def future_loading2(t, x=None): nonlocal t_counter, x_counter z = batt.output(x_counter) - z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter}) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z + data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) - results_custom = m_custom.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) + results_custom = m_custom.simulate_to( + data.times[-1], future_loading2, dt=1, save_freq=1 + ) # Step 6: Compare performance - print('Comparing performance...') - data.outputs.plot(title='original model', compact=False) - results.outputs.plot(title='generated model', compact=False) - results_custom.outputs.plot(title='custom model', compact=False) + print("Comparing performance...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) + results_custom.outputs.plot(title="custom model", compact=False) plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/c17e158cee55f8895ef5d6988a1c24ec/sim_powertrain.py b/docs/_downloads/c17e158cee55f8895ef5d6988a1c24ec/sim_powertrain.py index 9a298539..bf4dd5b2 100644 --- a/docs/_downloads/c17e158cee55f8895ef5d6988a1c24ec/sim_powertrain.py +++ b/docs/_downloads/c17e158cee55f8895ef5d6988a1c24ec/sim_powertrain.py @@ -2,11 +2,12 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a powertrain being simulated for a set amount of time. +Example of a powertrain being simulated for a set amount of time. """ from progpy.models import Powertrain, ESC, DCMotor + def run_example(): # Create a model object esc = ESC() @@ -15,16 +16,16 @@ def run_example(): # Define future loading function - 100% duty all the time def future_loading(t, x=None): - return powertrain.InputContainer({ - 'duty': 1, - 'v': 23 - }) - + return powertrain.InputContainer({"duty": 1, "v": 23}) + # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - simulated_results = powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + simulated_results = powertrain.simulate_to( + 2, future_loading, dt=2e-5, save_freq=0.1, print=True + ) + -# This allows the module to be executed directly -if __name__ == '__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/c1c42cad52e78cca207b51e1a40261dc/sim_pump.py b/docs/_downloads/c1c42cad52e78cca207b51e1a40261dc/sim_pump.py new file mode 100644 index 00000000..446bec20 --- /dev/null +++ b/docs/_downloads/c1c42cad52e78cca207b51e1a40261dc/sim_pump.py @@ -0,0 +1,69 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. + +""" +Example of a centrifugal pump being simulated until threshold is met. +""" + +from progpy.models import CentrifugalPump +import matplotlib.pyplot as plt +from progpy.sim_result import SimResult + + +def run_example(): + # Step 1: Setup Pump + pump = CentrifugalPump(process_noise=0) + pump.parameters["x0"]["wA"] = 0.01 # Set Wear Rate + + # Step 2: Setup Future Loading + cycle_time = 3600 + + def future_loading(t, x=None): + t = t % cycle_time + if t < cycle_time / 2.0: + V = 471.2389 + elif t < cycle_time / 2 + 100: + V = 471.2389 + (t - cycle_time / 2) + elif t < cycle_time - 100: + V = 571.2389 + else: + V = 471.2398 - (t - cycle_time) + + return pump.InputContainer( + {"Tamb": 290, "V": V, "pdisch": 928654, "psuc": 239179, "wsync": V * 0.8} + ) + + # Step 3: Sim + first_output = pump.output(pump.initialize(future_loading(0), {})) + config = {"horizon": 1e5, "save_freq": 1e3, "print": True} + simulated_results = pump.simulate_to_threshold( + future_loading, first_output, **config + ) + + # Step 4: Plot Results + simulated_results.inputs.plot( + compact=False, + title="Inputs", + xlabel="time", + ylabel={lbl: lbl for lbl in pump.inputs}, + ) + simulated_results.outputs.plot( + compact=False, title="Outputs", xlabel="time", ylabel="" + ) + simulated_results.states.plot( + compact=False, title="States", xlabel="time", ylabel="" + ) + simulated_results.event_states.plot( + compact=False, title="Events", xlabel="time", ylabel="" + ) + + thresholds_met = [pump.threshold_met(x) for x in simulated_results.states] + thresholds_met = SimResult(simulated_results.times, thresholds_met) + thresholds_met.plot(compact=False, title="Threshold Met", xlabel="time", ylabel="") + + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/c234367290acf8de0cb8c64a0b97191d/sim_battery_eol.py b/docs/_downloads/c234367290acf8de0cb8c64a0b97191d/sim_battery_eol.py index d1fcaef4..b943ea02 100644 --- a/docs/_downloads/c234367290acf8de0cb8c64a0b97191d/sim_battery_eol.py +++ b/docs/_downloads/c234367290acf8de0cb8c64a0b97191d/sim_battery_eol.py @@ -2,23 +2,24 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). +Example of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity). """ import matplotlib.pyplot as plt from progpy.models import BatteryElectroChem as Battery -def run_example(): + +def run_example(): # Step 1: Create a model object batt = Battery() # Step 2: Define future loading function - # Here we're using a function designed to charge until 0.95, + # Here we're using a function designed to charge until 0.95, # then discharge until 0.05 load = 1 def future_loading(t, x=None): - nonlocal load + nonlocal load # Rule for loading after initialization if x is not None: @@ -29,26 +30,33 @@ def future_loading(t, x=None): elif event_state["EOD"] < 0.05: load = -1 # Charge # Rule for loading at initialization - return batt.InputContainer({'i': load}) + return batt.InputContainer({"i": load}) # Step 3: Simulate to Capacity is insufficient Threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") options = { - 'save_freq': 1000, # Frequency at which results are saved - 'dt': 2, # Timestep - 'threshold_keys': ['InsufficientCapacity'], # Simulate to InsufficientCapacity - 'print': True + "save_freq": 1000, # Frequency at which results are saved + "dt": 2, # Timestep + "threshold_keys": ["InsufficientCapacity"], # Simulate to InsufficientCapacity + "print": True, } simulated_results = batt.simulate_to_threshold(future_loading, **options) # Step 4: Plot Results - simulated_results.inputs.plot(ylabel='Current drawn (amps)') - simulated_results.event_states.plot(ylabel='Event States', labels={'EOD': 'State of Charge (SOC)', 'InsufficientCapacity': 'State of Health (SOH)'}) + simulated_results.inputs.plot(ylabel="Current drawn (amps)") + simulated_results.event_states.plot( + ylabel="Event States", + labels={ + "EOD": "State of Charge (SOC)", + "InsufficientCapacity": "State of Health (SOH)", + }, + ) plt.ylim([0, 1]) plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py b/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py index 46fbb3fe..a0d48896 100644 --- a/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py +++ b/docs/_downloads/c416c00aa2e02950fa0b0b4547cf49f9/new_model.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example defining and using a new prognostics model. +Example defining and using a new prognostics model. In this example a simple state-transition model of an object thrown upward into the air is defined. That model is then used in simulation under different conditions and the results are displayed in different formats. """ @@ -15,106 +15,122 @@ class ThrownObject(PrognosticsModel): Model that simulates an object thrown into the air without air resistance """ - inputs = [] # no inputs, no way to control + inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] - outputs = [ # Anything we can measure - 'x' # Position (m) + "x", # Position (m) + "v", # Velocity (m/s) + ] + outputs = [ # Anything we can measure + "x" # Position (m) ] events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] # The Default parameters. Overwritten by passing parameters into constructor default_parameters = { - 'x0': { # Initial State - 'x': 1.83, # Height of thrower (m) - 'v': 40 # Velocity at which the object is thrown (m/s) + "x0": { # Initial State + "x": 1.83, # Height of thrower (m) + "v": 40, # Velocity at which the object is thrown (m/s) }, - 'g': -9.81, # Acceleration due to gravity (m/s^2) - 'process_noise': 0.0 # amount of noise in each step + "g": -9.81, # Acceleration due to gravity (m/s^2) + "process_noise": 0.0, # amount of noise in each step } def initialize(self, *args, **kwargs): self.max_x = 0 # Set maximum height return super().initialize(*args, **kwargs) - + def dx(self, x, u): - return self.StateContainer({'x': x['v'], - 'v': self.parameters['g']}) # Acceleration of gravity + return self.StateContainer( + {"x": x["v"], "v": self.parameters["g"]} + ) # Acceleration of gravity def output(self, x): - return self.OutputContainer({'x': x['x']}) + return self.OutputContainer({"x": x["x"]}) # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. # Threshold = Event State == 0. However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - self.max_x = max(self.max_x, x['x']) # Maximum altitude + def event_state(self, x): + self.max_x = max(self.max_x, x["x"]) # Maximum altitude return { - 'falling': max(x['v']/self.parameters['x0']['v'],0), # Throwing speed is max speed - 'impact': max(x['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + x["v"] / self.parameters["x0"]["v"], 0 + ), # Throwing speed is max speed + "impact": max( + x["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } + def run_example(): # Demo model # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return m.InputContainer({}) # No inputs, no way to control # Step 3: Simulate to impact - event = 'impact' - simulated_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1, print = True) - + event = "impact" + simulated_results = m.simulate_to_threshold( + future_load, events=event, dt=0.005, save_freq=1, print=True + ) + # Print flight time - print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2))) + print( + "The object hit the ground in {} seconds".format( + round(simulated_results.times[-1], 2) + ) + ) - # OK, now lets compare performance on different heavenly bodies. + # OK, now lets compare performance on different heavenly bodies. # This requires that we update the cofiguration grav_moon = -1.62 # The first way to change the configuration is to pass in your desired config into construction of the model - m = ThrownObject(g = grav_moon) - simulated_moon_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1) + m = ThrownObject(g=grav_moon) + simulated_moon_results = m.simulate_to_threshold( + future_load, events=event, dt=0.005, save_freq=1 + ) grav_mars = -3.711 # You can also update the parameters after it's constructed - m.parameters['g'] = grav_mars - simulated_mars_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1) + m.parameters["g"] = grav_mars + simulated_mars_results = m.simulate_to_threshold( + future_load, events=event, dt=0.005, save_freq=1 + ) grav_venus = -8.87 - m.parameters['g'] = grav_venus - simulated_venus_results = m.simulate_to_threshold(future_load, events=event, dt=0.005, save_freq=1) + m.parameters["g"] = grav_venus + simulated_venus_results = m.simulate_to_threshold( + future_load, events=event, dt=0.005, save_freq=1 + ) - print('Time to hit the ground: ') - print('\tvenus: {}s'.format(round(simulated_venus_results.times[-1],2))) - print('\tearth: {}s'.format(round(simulated_results.times[-1],2))) - print('\tmars: {}s'.format(round(simulated_mars_results.times[-1],2))) - print('\tmoon: {}s'.format(round(simulated_moon_results.times[-1],2))) + print("Time to hit the ground: ") + print("\tvenus: {}s".format(round(simulated_venus_results.times[-1], 2))) + print("\tearth: {}s".format(round(simulated_results.times[-1], 2))) + print("\tmars: {}s".format(round(simulated_mars_results.times[-1], 2))) + print("\tmoon: {}s".format(round(simulated_moon_results.times[-1], 2))) # We can also simulate until any event is met by neglecting the events argument simulated_results = m.simulate_to_threshold(future_load, dt=0.005, save_freq=1) threshs_met = m.threshold_met(simulated_results.states[-1]) - for (key, met) in threshs_met.items(): + for key, met in threshs_met.items(): if met: event_occurred = key - print('\nThis event that occurred first: ', event_occurred) + print("\nThis event that occurred first: ", event_occurred) # It falls before it hits the ground, obviously # Metrics can be analyzed from the simulation results. For example: monotonicity - print('\nMonotonicity: ', simulated_results.event_states.monotonicity()) + print("\nMonotonicity: ", simulated_results.event_states.monotonicity()) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/c528558fd42bc49e87db753f5eefc58d/lstm_model.ipynb b/docs/_downloads/c528558fd42bc49e87db753f5eefc58d/lstm_model.ipynb index 18f90bee..af6ef44b 100644 --- a/docs/_downloads/c528558fd42bc49e87db753f5eefc58d/lstm_model.ipynb +++ b/docs/_downloads/c528558fd42bc49e87db753f5eefc58d/lstm_model.ipynb @@ -1,54 +1,281 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample building a LSTMStateTransitionModel from data. This is a simple example of how to use the LSTMStateTransitionModel class.\n\nIn this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We then use the generated model and compare to the original model.\n\nFinally, we repeat the exercise with data from the more complex BatteryElectroChemEOD model.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom progpy.data_models import LSTMStateTransitionModel\nfrom progpy.models import ThrownObject, BatteryElectroChemEOD\n\ndef run_example():\n # -----------------------------------------------------\n # Example 1- set timestep \n # Here we will create a model for a specific timestep.\n # The model will only work with that timestep \n # This is useful if you know the timestep you would like to use\n # -----------------------------------------------------\n TIMESTEP = 0.01\n\n # Step 1: Generate data\n # We'll use the ThrownObject model to generate data.\n # For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), \n # you'll replace that generated data with your own.\n print('Generating data')\n m = ThrownObject()\n\n def future_loading(t, x=None):\n return m.InputContainer({}) # No input for thrown object \n\n data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP)\n\n # Step 2: Generate model\n # We'll use the LSTMStateTransitionModel class to generate a model from the data.\n print('Building model...')\n m2 = LSTMStateTransitionModel.from_data(\n inputs = [data.inputs],\n outputs = [data.outputs], \n window=4, \n epochs=30,\n output_keys = ['x']) \n \n # Step 3: Use model to simulate_to time of threshold\n print('Simulating with generated model...')\n\n t_counter = 0\n x_counter = m.initialize()\n def future_loading2(t, x = None):\n # Future Loading is a bit complicated here \n # Loading for the resulting model includes the data inputs, \n # and the output from the last timestep\n nonlocal t_counter, x_counter\n z = m.output(x_counter)\n z = m2.InputContainer(z.matrix)\n x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)\n t_counter = t\n return z\n \n results2 = m2.simulate_to(data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP)\n\n # Step 4: Compare model to original model\n print('Comparing results...')\n data.outputs.plot(title='original model')\n results2.outputs.plot(title='generated model')\n plt.show()\n\n # -----------------------------------------------------\n # Example 2- variable timestep \n # Here we will create a model to work with any timestep\n # We do this by adding timestep as a variable in the model\n # -----------------------------------------------------\n\n # Step 1: Generate additional data\n # We will use data generated above, but we also want data at additional timesteps \n print('\\n------------------------------------------\\nExample 2...')\n print('Generating additional data...')\n data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)\n data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)\n data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)\n data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)\n\n # Step 2: Data Prep\n # We need to add the timestep as a input\n u = np.array([[TIMESTEP] for _ in data.inputs])\n u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs])\n u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs])\n u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs])\n u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs])\n\n input_data = [u, u_half, u_quarter, u_twice, u_four]\n output_data = [data.outputs, data_half.outputs, data_quarter.outputs, data_twice.outputs, data_four.outputs]\n\n # Step 3: Generate Model\n print('Building model...')\n m3 = LSTMStateTransitionModel.from_data(\n inputs = input_data, \n outputs = output_data,\n window=4, \n epochs=30, \n input_keys = ['dt'],\n output_keys = ['x']) \n # Note, since we're generating from a model, we could also have done this:\n # m3 = LSTMStateTransitionModel.from_model(\n # m,\n # [future_loading for _ in range(5)],\n # dt = [TIMESTEP, TIMESTEP/2, TIMESTEP/4, TIMESTEP*2, TIMESTEP*4],\n # window=4, \n # epochs=30) \n\n # Step 4: Simulate with model\n t_counter = 0\n x_counter = m.initialize()\n def future_loading3(t, x = None):\n nonlocal t_counter, x_counter\n z = m3.InputContainer({'x_t-1': x_counter['x'], 'dt': t - t_counter})\n x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)\n t_counter = t\n return z\n\n # Use new dt, not used in training\n # Using a dt not used in training will demonstrate the model's \n # ability to handle different timesteps not part of training set\n data = m.simulate_to(data.times[-1], future_loading, dt=TIMESTEP*3, save_freq=TIMESTEP*3)\n results3 = m3.simulate_to(data.times[-1], future_loading3, dt=TIMESTEP*3, save_freq=TIMESTEP*3)\n\n # Step 5: Compare Results\n print('Comparing results...')\n data.outputs.plot(title='original model')\n results3.outputs.plot(title='generated model')\n plt.show()\n\n # -----------------------------------------------------\n # Example 3- More complicated system\n # Here we will create a model for a more complicated system\n # For this example we will use the BatteryElectroChemEOD model\n # -----------------------------------------------------\n print('\\n------------------------------------------\\nExample 3...')\n print('Generating data...')\n batt = BatteryElectroChemEOD(process_noise = 0, measurement_noise=0)\n future_loading_eqns = [lambda t, x=None, load=load: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)]\n # Generate data with different loading and step sizes\n # Adding the step size as an element of the output\n input_data = []\n output_data = []\n for i in range(9):\n dt = i/3+0.25\n for loading_eqn in future_loading_eqns:\n d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) \n input_data.append(np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float))\n output_data.append(d.outputs)\n \n # Step 2: Generate Model\n print('Building model...') \n m_batt = LSTMStateTransitionModel.from_data(\n inputs = input_data,\n outputs = output_data,\n window=12, \n epochs=3, \n units=64, # Additional units given the increased complexity of the system\n input_keys = ['i', 'dt'],\n output_keys = ['t', 'v']) \n\n # Step 3: Simulate with model\n t_counter = 0\n x_counter = batt.initialize()\n\n def future_loading(t, x=None):\n return batt.InputContainer({'i': 3})\n\n def future_loading2(t, x = None):\n nonlocal t_counter, x_counter\n z = batt.output(x_counter)\n z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter})\n x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter)\n t_counter = t\n return z\n\n # Use a new dt, not used in training. \n # Using a dt not used in training will demonstrate the model's \n # ability to handle different timesteps not part of training set\n data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1)\n results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)\n\n # Step 5: Compare Results\n print('Comparing results...')\n data.outputs.plot(title='original model', compact=False)\n results.outputs.plot(title='generated model', compact=False)\n plt.show()\n\n # This last example isn't a perfect fit, but it matches the behavior pretty well\n # Especially the voltage curve\n\nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample building a LSTMStateTransitionModel from data. This is a simple example of how to use the LSTMStateTransitionModel class.\n\nIn this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We then use the generated model and compare to the original model.\n\nFinally, we repeat the exercise with data from the more complex BatteryElectroChemEOD model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "from progpy.data_models import LSTMStateTransitionModel\n", + "from progpy.models import ThrownObject, BatteryElectroChemEOD\n", + "\n", + "\n", + "def run_example():\n", + " # -----------------------------------------------------\n", + " # Example 1- set timestep\n", + " # Here we will create a model for a specific timestep.\n", + " # The model will only work with that timestep\n", + " # This is useful if you know the timestep you would like to use\n", + " # -----------------------------------------------------\n", + " TIMESTEP = 0.01\n", + "\n", + " # Step 1: Generate data\n", + " # We'll use the ThrownObject model to generate data.\n", + " # For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment),\n", + " # you'll replace that generated data with your own.\n", + " print(\"Generating data\")\n", + " m = ThrownObject()\n", + "\n", + " def future_loading(t, x=None):\n", + " return m.InputContainer({}) # No input for thrown object\n", + "\n", + " data = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP, dt=TIMESTEP\n", + " )\n", + "\n", + " # Step 2: Generate model\n", + " # We'll use the LSTMStateTransitionModel class to generate a model from the data.\n", + " print(\"Building model...\")\n", + " m2 = LSTMStateTransitionModel.from_data(\n", + " inputs=[data.inputs],\n", + " outputs=[data.outputs],\n", + " window=4,\n", + " epochs=30,\n", + " output_keys=[\"x\"],\n", + " )\n", + "\n", + " # Step 3: Use model to simulate_to time of threshold\n", + " print(\"Simulating with generated model...\")\n", + "\n", + " t_counter = 0\n", + " x_counter = m.initialize()\n", + "\n", + " def future_loading2(t, x=None):\n", + " # Future Loading is a bit complicated here\n", + " # Loading for the resulting model includes the data inputs,\n", + " # and the output from the last timestep\n", + " nonlocal t_counter, x_counter\n", + " z = m.output(x_counter)\n", + " z = m2.InputContainer(z.matrix)\n", + " x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)\n", + " t_counter = t\n", + " return z\n", + "\n", + " results2 = m2.simulate_to(\n", + " data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP\n", + " )\n", + "\n", + " # Step 4: Compare model to original model\n", + " print(\"Comparing results...\")\n", + " data.outputs.plot(title=\"original model\")\n", + " results2.outputs.plot(title=\"generated model\")\n", + " plt.show()\n", + "\n", + " # -----------------------------------------------------\n", + " # Example 2- variable timestep\n", + " # Here we will create a model to work with any timestep\n", + " # We do this by adding timestep as a variable in the model\n", + " # -----------------------------------------------------\n", + "\n", + " # Step 1: Generate additional data\n", + " # We will use data generated above, but we also want data at additional timesteps\n", + " print(\"\\n------------------------------------------\\nExample 2...\")\n", + " print(\"Generating additional data...\")\n", + " data_half = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2\n", + " )\n", + " data_quarter = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4\n", + " )\n", + " data_twice = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2\n", + " )\n", + " data_four = m.simulate_to_threshold(\n", + " future_loading, threshold_keys=\"impact\", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4\n", + " )\n", + "\n", + " # Step 2: Data Prep\n", + " # We need to add the timestep as a input\n", + " u = np.array([[TIMESTEP] for _ in data.inputs])\n", + " u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs])\n", + " u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs])\n", + " u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs])\n", + " u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs])\n", + "\n", + " input_data = [u, u_half, u_quarter, u_twice, u_four]\n", + " output_data = [\n", + " data.outputs,\n", + " data_half.outputs,\n", + " data_quarter.outputs,\n", + " data_twice.outputs,\n", + " data_four.outputs,\n", + " ]\n", + "\n", + " # Step 3: Generate Model\n", + " print(\"Building model...\")\n", + " m3 = LSTMStateTransitionModel.from_data(\n", + " inputs=input_data,\n", + " outputs=output_data,\n", + " window=4,\n", + " epochs=30,\n", + " input_keys=[\"dt\"],\n", + " output_keys=[\"x\"],\n", + " )\n", + " # Note, since we're generating from a model, we could also have done this:\n", + " # m3 = LSTMStateTransitionModel.from_model(\n", + " # m,\n", + " # [future_loading for _ in range(5)],\n", + " # dt = [TIMESTEP, TIMESTEP/2, TIMESTEP/4, TIMESTEP*2, TIMESTEP*4],\n", + " # window=4,\n", + " # epochs=30)\n", + "\n", + " # Step 4: Simulate with model\n", + " t_counter = 0\n", + " x_counter = m.initialize()\n", + "\n", + " def future_loading3(t, x=None):\n", + " nonlocal t_counter, x_counter\n", + " z = m3.InputContainer({\"x_t-1\": x_counter[\"x\"], \"dt\": t - t_counter})\n", + " x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)\n", + " t_counter = t\n", + " return z\n", + "\n", + " # Use new dt, not used in training\n", + " # Using a dt not used in training will demonstrate the model's\n", + " # ability to handle different timesteps not part of training set\n", + " data = m.simulate_to(\n", + " data.times[-1], future_loading, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3\n", + " )\n", + " results3 = m3.simulate_to(\n", + " data.times[-1], future_loading3, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3\n", + " )\n", + "\n", + " # Step 5: Compare Results\n", + " print(\"Comparing results...\")\n", + " data.outputs.plot(title=\"original model\")\n", + " results3.outputs.plot(title=\"generated model\")\n", + " plt.show()\n", + "\n", + " # -----------------------------------------------------\n", + " # Example 3- More complicated system\n", + " # Here we will create a model for a more complicated system\n", + " # For this example we will use the BatteryElectroChemEOD model\n", + " # -----------------------------------------------------\n", + " print(\"\\n------------------------------------------\\nExample 3...\")\n", + " print(\"Generating data...\")\n", + " batt = BatteryElectroChemEOD(process_noise=0, measurement_noise=0)\n", + " future_loading_eqns = [\n", + " lambda t, x=None, load=load: batt.InputContainer({\"i\": 1 + 1.5 * load})\n", + " for load in range(6)\n", + " ]\n", + " # Generate data with different loading and step sizes\n", + " # Adding the step size as an element of the output\n", + " input_data = []\n", + " output_data = []\n", + " for i in range(9):\n", + " dt = i / 3 + 0.25\n", + " for loading_eqn in future_loading_eqns:\n", + " d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt)\n", + " input_data.append(\n", + " np.array(\n", + " [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs],\n", + " dtype=float,\n", + " )\n", + " )\n", + " output_data.append(d.outputs)\n", + "\n", + " # Step 2: Generate Model\n", + " print(\"Building model...\")\n", + " m_batt = LSTMStateTransitionModel.from_data(\n", + " inputs=input_data,\n", + " outputs=output_data,\n", + " window=12,\n", + " epochs=3,\n", + " units=64, # Additional units given the increased complexity of the system\n", + " input_keys=[\"i\", \"dt\"],\n", + " output_keys=[\"t\", \"v\"],\n", + " )\n", + "\n", + " # Step 3: Simulate with model\n", + " t_counter = 0\n", + " x_counter = batt.initialize()\n", + "\n", + " def future_loading(t, x=None):\n", + " return batt.InputContainer({\"i\": 3})\n", + "\n", + " def future_loading2(t, x=None):\n", + " nonlocal t_counter, x_counter\n", + " z = batt.output(x_counter)\n", + " z = m_batt.InputContainer(\n", + " {\"i\": 3, \"t_t-1\": z[\"t\"], \"v_t-1\": z[\"v\"], \"dt\": t - t_counter}\n", + " )\n", + " x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter)\n", + " t_counter = t\n", + " return z\n", + "\n", + " # Use a new dt, not used in training.\n", + " # Using a dt not used in training will demonstrate the model's\n", + " # ability to handle different timesteps not part of training set\n", + " data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1)\n", + " results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)\n", + "\n", + " # Step 5: Compare Results\n", + " print(\"Comparing results...\")\n", + " data.outputs.plot(title=\"original model\", compact=False)\n", + " results.outputs.plot(title=\"generated model\", compact=False)\n", + " plt.show()\n", + "\n", + " # This last example isn't a perfect fit, but it matches the behavior pretty well\n", + " # Especially the voltage curve\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/c586316595cdbd6c17fcb9d03d91ca65/mixture_of_experts.py b/docs/_downloads/c586316595cdbd6c17fcb9d03d91ca65/mixture_of_experts.py index f83eb522..36bc6faf 100644 --- a/docs/_downloads/c586316595cdbd6c17fcb9d03d91ca65/mixture_of_experts.py +++ b/docs/_downloads/c586316595cdbd6c17fcb9d03d91ca65/mixture_of_experts.py @@ -9,66 +9,67 @@ from progpy import MixtureOfExpertsModel from progpy.models import BatteryCircuit + def run_example(): - # Mixture of Experts (MoE) models combine multiple - # models of the same system, similar to Ensemble - # models. Unlike Ensemble Models, the aggregation - # is done by selecting the "best" model. That is - # the model that has performed the best over the - # past. Each model will have a 'score' that is - # tracked in the state, and this determines which + # Mixture of Experts (MoE) models combine multiple + # models of the same system, similar to Ensemble + # models. Unlike Ensemble Models, the aggregation + # is done by selecting the "best" model. That is + # the model that has performed the best over the + # past. Each model will have a 'score' that is + # tracked in the state, and this determines which # model is best. - # To demonstrate this feature we will repeat the - # example from the ensemble model section, this - # time with a mixture of experts model. For this - # example to work you will have had to have run + # To demonstrate this feature we will repeat the + # example from the ensemble model section, this + # time with a mixture of experts model. For this + # example to work you will have had to have run # the ensemble model section example. - # First, let's combine three battery circuit + # First, let's combine three battery circuit # models into a single mixture of experts model. - print('setting up....') + print("setting up....") m_circuit = BatteryCircuit(qMax=6700, Rs=0.055) m_circuit_2 = BatteryCircuit(qMax=7860) m_circuit_3 = BatteryCircuit() - m = MixtureOfExpertsModel( - models=(m_circuit, m_circuit_2, m_circuit_3)) + m = MixtureOfExpertsModel(models=(m_circuit, m_circuit_2, m_circuit_3)) - # Note: The combined model has the same outputs and + # Note: The combined model has the same outputs and # events as the circuit model. - print('outputs: ', m.outputs) - print('events: ', m.events) - - # Its states contain all of the states of each model, - # kept separate. Each individual model comprising - # the MoE model will be simulated separately, so the - # model keeps track of the states propogated through - # each model separately. The states also include + print("outputs: ", m.outputs) + print("events: ", m.events) + + # Its states contain all of the states of each model, + # kept separate. Each individual model comprising + # the MoE model will be simulated separately, so the + # model keeps track of the states propogated through + # each model separately. The states also include # scores for each model. - print('states: ', m.states) - - #The MoE model inputs include both the comprised - # model input, i (current) and outputs: v (voltage) - # and t(temperature). The comprised model outputs - # are provided to update the scores of each model - # when performing state transition. If they are - # not provided when calling next_state, then scores + print("states: ", m.states) + + # The MoE model inputs include both the comprised + # model input, i (current) and outputs: v (voltage) + # and t(temperature). The comprised model outputs + # are provided to update the scores of each model + # when performing state transition. If they are + # not provided when calling next_state, then scores # would not be updated. - print('inputs: ', m.inputs) + print("inputs: ", m.inputs) - # Now let's evaluate the performance of the combined - # model using real battery data from NASA's prognostic + # Now let's evaluate the performance of the combined + # model using real battery data from NASA's prognostic # data repository (note: this may take a while) from progpy.datasets import nasa_battery - print ('downloading data... (this may take a while)') + + print("downloading data... (this may take a while)") data = nasa_battery.load_data(batt_id=8)[1] RUN_ID = 0 - test_input = [{'i': i} for i in data[RUN_ID]['current']] - test_time = data[RUN_ID]['relativeTime'] + test_input = [{"i": i} for i in data[RUN_ID]["current"]] + test_time = data[RUN_ID]["relativeTime"] t_end = test_time.iloc[-1] - # To evaluate the model we first create a future + # To evaluate the model we first create a future # loading function that uses the loading from the data. def future_loading(t, x=None): for i, mission_time in enumerate(test_time): @@ -76,84 +77,109 @@ def future_loading(t, x=None): return m_circuit.InputContainer(test_input[i]) return m_circuit.InputContainer(test_input[-1]) # Default - last load - print('\n------------------\nSimulating... (this may also take a while)') + print("\n------------------\nSimulating... (this may also take a while)") results_moe = m.simulate_to(t_end, future_loading) plt.figure() - fig = plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth') - fig = plt.plot(results_moe.times, [z['v'] for z in results_moe.outputs], color='red', label='moe') - plt.title('MixtureOfExperts, before provided data') - plt.xlabel('Time (s)') - plt.ylabel('Voltage') + fig = plt.plot( + test_time, data[RUN_ID]["voltage"], color="green", label="ground truth" + ) + fig = plt.plot( + results_moe.times, + [z["v"] for z in results_moe.outputs], + color="red", + label="moe", + ) + plt.title("MixtureOfExperts, before provided data") + plt.xlabel("Time (s)") + plt.ylabel("Voltage") plt.legend() - # Here the model performs pretty poorly. If you were to - # look at the state, we see that the three scores are - # equal. This is because we haven't provided any output information. The future load function doesn't include - # the output, just the input (i). When the three scores + # Here the model performs pretty poorly. If you were to + # look at the state, we see that the three scores are + # equal. This is because we haven't provided any output information. The future load function doesn't include + # the output, just the input (i). When the three scores # are equal like this, the first model is used. - print('Model 1 Score: ', results_moe.states[-1]['BatteryCircuit._score']) - print('Model 2 Score: ', results_moe.states[-1]['BatteryCircuit_2._score']) - print('Model 3 Score: ', results_moe.states[-1]['BatteryCircuit_3._score']) + print("Model 1 Score: ", results_moe.states[-1]["BatteryCircuit._score"]) + print("Model 2 Score: ", results_moe.states[-1]["BatteryCircuit_2._score"]) + print("Model 3 Score: ", results_moe.states[-1]["BatteryCircuit_3._score"]) # Now let's provide the output for a few steps. - print('\n------------------\nProviding data...\n') + print("\n------------------\nProviding data...\n") x0 = m.initialize() x = m.next_state( - x=x0, - u=m.InputContainer({ - 'i': test_input[0]['i'], - 'v': data[RUN_ID]['voltage'][0], - 't': data[RUN_ID]['temperature'][0]}), - dt=test_time[1]-test_time[0]) + x=x0, + u=m.InputContainer( + { + "i": test_input[0]["i"], + "v": data[RUN_ID]["voltage"][0], + "t": data[RUN_ID]["temperature"][0], + } + ), + dt=test_time[1] - test_time[0], + ) x = m.next_state( - x=x, - u=m.InputContainer({ - 'i': test_input[1]['i'], - 'v': data[RUN_ID]['voltage'][1], - 't': data[RUN_ID]['temperature'][1]}), - dt=test_time[1]-test_time[0]) - + x=x, + u=m.InputContainer( + { + "i": test_input[1]["i"], + "v": data[RUN_ID]["voltage"][1], + "t": data[RUN_ID]["temperature"][1], + } + ), + dt=test_time[1] - test_time[0], + ) + # Let's take a look at the model scores again - print('Model 1 Score: ', x['BatteryCircuit._score']) - print('Model 2 Score: ', x['BatteryCircuit_2._score']) - print('Model 3 Score: ', x['BatteryCircuit_3._score']) + print("Model 1 Score: ", x["BatteryCircuit._score"]) + print("Model 2 Score: ", x["BatteryCircuit_2._score"]) + print("Model 3 Score: ", x["BatteryCircuit_3._score"]) # Here we see after a few steps the algorithm has determined that model 3 is the better fitting of the models. Now if we were to repeat the simulation, it would use the best model, resulting in a better fit. - print('\n------------------\nRe-simulating... (this may also take a while)\n') - results_moe = m.simulate_to(t_end, future_loading, t0=test_time[1]-test_time[0], x=x) + print("\n------------------\nRe-simulating... (this may also take a while)\n") + results_moe = m.simulate_to( + t_end, future_loading, t0=test_time[1] - test_time[0], x=x + ) plt.figure() - fig = plt.plot(test_time[2:], data[RUN_ID]['voltage'][2:], color='green', label='ground truth') - fig = plt.plot(results_moe.times[2:], [z['v'] for z in results_moe.outputs][2:], color='red', label='moe') - plt.title('MixtureOfExperts, after provided data') - plt.xlabel('Time (s)') - plt.ylabel('Voltage') + fig = plt.plot( + test_time[2:], data[RUN_ID]["voltage"][2:], color="green", label="ground truth" + ) + fig = plt.plot( + results_moe.times[2:], + [z["v"] for z in results_moe.outputs][2:], + color="red", + label="moe", + ) + plt.title("MixtureOfExperts, after provided data") + plt.xlabel("Time (s)") + plt.ylabel("Voltage") plt.legend() plt.show() - # The fit here is much better. The MoE model learned + # The fit here is much better. The MoE model learned # which of the three models best fit the observed behavior. - # In a prognostic application, the scores will be - # updated each time you use a state estimator - # (so long as you provide the output as part of the input). - # Then when performing a prediction the scores aren't + # In a prognostic application, the scores will be + # updated each time you use a state estimator + # (so long as you provide the output as part of the input). + # Then when performing a prediction the scores aren't # updated, since outputs are not known. - # An example of when this would be useful is for cases - # where there are three common degradation paths or - # "modes" rather than a single model with uncertainty - # to represent every mode, the three modes can be - # represented by three different models. Once enough - # of the degradation path has been observed the observed + # An example of when this would be useful is for cases + # where there are three common degradation paths or + # "modes" rather than a single model with uncertainty + # to represent every mode, the three modes can be + # represented by three different models. Once enough + # of the degradation path has been observed the observed # mode will be the one reported. - # If the model fit is expected to be stable - # (that is, the best model is not expected to change anymore). - # The best model can be extracted and used directly, + # If the model fit is expected to be stable + # (that is, the best model is not expected to change anymore). + # The best model can be extracted and used directly, # like demonstrated below. name, m_best = m.best_model(x) print(name, " was the best fit") -# This allows the module to be executed directly -if __name__=='__main__': - run_example() \ No newline at end of file + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/sphinx-config/auto_examples/benchmarking.py b/docs/_downloads/c5f01fb79e30655f6d28daf756382ba2/benchmarking.py similarity index 54% rename from sphinx-config/auto_examples/benchmarking.py rename to docs/_downloads/c5f01fb79e30655f6d28daf756382ba2/benchmarking.py index c5c23bfa..a57ac19c 100644 --- a/sphinx-config/auto_examples/benchmarking.py +++ b/docs/_downloads/c5f01fb79e30655f6d28daf756382ba2/benchmarking.py @@ -2,30 +2,36 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example benchmarking the computational efficiency of models. +Simple example benchmarking the computational efficiency of models. """ -from timeit import timeit from progpy.models import BatteryCircuit +from timeit import timeit + def run_example(): # Step 1: Create a model object batt = BatteryCircuit() - - # Step 2: Define future loading function + + # Step 2: Define future loading function + loading = batt.InputContainer({"i": 2}) # Constant loading + def future_loading(t, x=None): # Constant Loading - return batt.InputContainer({'i': 2}) + return loading # Step 3: Benchmark simulation of 600 seconds - print('Benchmarking...') - def sim(): - results = batt.simulate_to(600, future_loading) + print("Benchmarking...") + + def sim(): + batt.simulate_to(600, future_loading) + time = timeit(sim, number=500) # Print results - print('Simulation Time: {} ms/sim'.format(time*2)) + print("Simulation Time: {} ms/sim".format(time)) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/c6d7444f618d6ae233283f5ff9e60318/lstm_model.py b/docs/_downloads/c6d7444f618d6ae233283f5ff9e60318/lstm_model.py index a34f81d0..b74286a1 100644 --- a/docs/_downloads/c6d7444f618d6ae233283f5ff9e60318/lstm_model.py +++ b/docs/_downloads/c6d7444f618d6ae233283f5ff9e60318/lstm_model.py @@ -1,5 +1,5 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. -# This ensures that the directory containing examples is in the python search directories +# This ensures that the directory containing examples is in the python search directories """ Example building a LSTMStateTransitionModel from data. This is a simple example of how to use the LSTMStateTransitionModel class. @@ -15,45 +15,50 @@ from progpy.data_models import LSTMStateTransitionModel from progpy.models import ThrownObject, BatteryElectroChemEOD + def run_example(): # ----------------------------------------------------- - # Example 1- set timestep + # Example 1- set timestep # Here we will create a model for a specific timestep. - # The model will only work with that timestep + # The model will only work with that timestep # This is useful if you know the timestep you would like to use # ----------------------------------------------------- TIMESTEP = 0.01 # Step 1: Generate data # We'll use the ThrownObject model to generate data. - # For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), + # For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), # you'll replace that generated data with your own. - print('Generating data') + print("Generating data") m = ThrownObject() def future_loading(t, x=None): - return m.InputContainer({}) # No input for thrown object + return m.InputContainer({}) # No input for thrown object - data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP) + data = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) # Step 2: Generate model # We'll use the LSTMStateTransitionModel class to generate a model from the data. - print('Building model...') + print("Building model...") m2 = LSTMStateTransitionModel.from_data( - inputs = [data.inputs], - outputs = [data.outputs], - window=4, + inputs=[data.inputs], + outputs=[data.outputs], + window=4, epochs=30, - output_keys = ['x']) - + output_keys=["x"], + ) + # Step 3: Use model to simulate_to time of threshold - print('Simulating with generated model...') + print("Simulating with generated model...") t_counter = 0 x_counter = m.initialize() - def future_loading2(t, x = None): - # Future Loading is a bit complicated here - # Loading for the resulting model includes the data inputs, + + def future_loading2(t, x=None): + # Future Loading is a bit complicated here + # Loading for the resulting model includes the data inputs, # and the output from the last timestep nonlocal t_counter, x_counter z = m.output(x_counter) @@ -61,78 +66,100 @@ def future_loading2(t, x = None): x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z - - results2 = m2.simulate_to(data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP) + + results2 = m2.simulate_to( + data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP + ) # Step 4: Compare model to original model - print('Comparing results...') - data.outputs.plot(title='original model') - results2.outputs.plot(title='generated model') + print("Comparing results...") + data.outputs.plot(title="original model") + results2.outputs.plot(title="generated model") plt.show() # ----------------------------------------------------- - # Example 2- variable timestep + # Example 2- variable timestep # Here we will create a model to work with any timestep # We do this by adding timestep as a variable in the model # ----------------------------------------------------- # Step 1: Generate additional data - # We will use data generated above, but we also want data at additional timesteps - print('\n------------------------------------------\nExample 2...') - print('Generating additional data...') - data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2) - data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4) - data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2) - data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4) + # We will use data generated above, but we also want data at additional timesteps + print("\n------------------------------------------\nExample 2...") + print("Generating additional data...") + data_half = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) # Step 2: Data Prep # We need to add the timestep as a input u = np.array([[TIMESTEP] for _ in data.inputs]) - u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs]) - u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs]) - u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs]) - u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) input_data = [u, u_half, u_quarter, u_twice, u_four] - output_data = [data.outputs, data_half.outputs, data_quarter.outputs, data_twice.outputs, data_four.outputs] + output_data = [ + data.outputs, + data_half.outputs, + data_quarter.outputs, + data_twice.outputs, + data_four.outputs, + ] # Step 3: Generate Model - print('Building model...') + print("Building model...") m3 = LSTMStateTransitionModel.from_data( - inputs = input_data, - outputs = output_data, - window=4, - epochs=30, - input_keys = ['dt'], - output_keys = ['x']) + inputs=input_data, + outputs=output_data, + window=4, + epochs=30, + input_keys=["dt"], + output_keys=["x"], + ) # Note, since we're generating from a model, we could also have done this: # m3 = LSTMStateTransitionModel.from_model( # m, # [future_loading for _ in range(5)], # dt = [TIMESTEP, TIMESTEP/2, TIMESTEP/4, TIMESTEP*2, TIMESTEP*4], - # window=4, - # epochs=30) + # window=4, + # epochs=30) # Step 4: Simulate with model t_counter = 0 x_counter = m.initialize() - def future_loading3(t, x = None): + + def future_loading3(t, x=None): nonlocal t_counter, x_counter - z = m3.InputContainer({'x_t-1': x_counter['x'], 'dt': t - t_counter}) + z = m3.InputContainer({"x_t-1": x_counter["x"], "dt": t - t_counter}) x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z # Use new dt, not used in training - # Using a dt not used in training will demonstrate the model's + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set - data = m.simulate_to(data.times[-1], future_loading, dt=TIMESTEP*3, save_freq=TIMESTEP*3) - results3 = m3.simulate_to(data.times[-1], future_loading3, dt=TIMESTEP*3, save_freq=TIMESTEP*3) + data = m.simulate_to( + data.times[-1], future_loading, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m3.simulate_to( + data.times[-1], future_loading3, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) # Step 5: Compare Results - print('Comparing results...') - data.outputs.plot(title='original model') - results3.outputs.plot(title='generated model') + print("Comparing results...") + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") plt.show() # ----------------------------------------------------- @@ -140,61 +167,73 @@ def future_loading3(t, x = None): # Here we will create a model for a more complicated system # For this example we will use the BatteryElectroChemEOD model # ----------------------------------------------------- - print('\n------------------------------------------\nExample 3...') - print('Generating data...') - batt = BatteryElectroChemEOD(process_noise = 0, measurement_noise=0) - future_loading_eqns = [lambda t, x=None, load=load: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)] + print("\n------------------------------------------\nExample 3...") + print("Generating data...") + batt = BatteryElectroChemEOD(process_noise=0, measurement_noise=0) + future_loading_eqns = [ + lambda t, x=None, load=load: batt.InputContainer({"i": 1 + 1.5 * load}) + for load in range(6) + ] # Generate data with different loading and step sizes # Adding the step size as an element of the output input_data = [] output_data = [] for i in range(9): - dt = i/3+0.25 + dt = i / 3 + 0.25 for loading_eqn in future_loading_eqns: - d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) - input_data.append(np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float)) + d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) + input_data.append( + np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], + dtype=float, + ) + ) output_data.append(d.outputs) - + # Step 2: Generate Model - print('Building model...') + print("Building model...") m_batt = LSTMStateTransitionModel.from_data( - inputs = input_data, - outputs = output_data, - window=12, - epochs=3, + inputs=input_data, + outputs=output_data, + window=12, + epochs=3, units=64, # Additional units given the increased complexity of the system - input_keys = ['i', 'dt'], - output_keys = ['t', 'v']) + input_keys=["i", "dt"], + output_keys=["t", "v"], + ) # Step 3: Simulate with model t_counter = 0 x_counter = batt.initialize() def future_loading(t, x=None): - return batt.InputContainer({'i': 3}) + return batt.InputContainer({"i": 3}) - def future_loading2(t, x = None): + def future_loading2(t, x=None): nonlocal t_counter, x_counter z = batt.output(x_counter) - z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter}) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z - # Use a new dt, not used in training. - # Using a dt not used in training will demonstrate the model's + # Use a new dt, not used in training. + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) # Step 5: Compare Results - print('Comparing results...') - data.outputs.plot(title='original model', compact=False) - results.outputs.plot(title='generated model', compact=False) + print("Comparing results...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) plt.show() # This last example isn't a perfect fit, but it matches the behavior pretty well # Especially the voltage curve -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/c8c9cf630cf663d27db8c6af85ffab4a/full_lstm_model.py b/docs/_downloads/c8c9cf630cf663d27db8c6af85ffab4a/full_lstm_model.py index eb04c622..644b6dd5 100644 --- a/docs/_downloads/c8c9cf630cf663d27db8c6af85ffab4a/full_lstm_model.py +++ b/docs/_downloads/c8c9cf630cf663d27db8c6af85ffab4a/full_lstm_model.py @@ -2,11 +2,11 @@ # This ensures that the directory containing examples is in the python search directories """ -Example building a full model with events and thresholds using LSTMStateTransitionModel. +Example building a full model with events and thresholds using LSTMStateTransitionModel. .. dropdown:: More details - In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. + In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We then create a subclass of the LSTMStateTransitionModel, defining the event_state and threshold equations as a function of output. We use the generated model and compare to the original model. """ @@ -16,50 +16,68 @@ from prog_models.data_models import LSTMStateTransitionModel from prog_models.models import ThrownObject + def run_example(): # ----------------------------------------------------- # Method 1 - manual definition - # In this example we complete the models by manually defining event_state + # In this example we complete the models by manually defining event_state # and thresholds_met as function of output. # ----------------------------------------------------- TIMESTEP = 0.01 m = ThrownObject() + def future_loading(t, x=None): - return m.InputContainer({}) # No input for thrown object + return m.InputContainer({}) # No input for thrown object # Step 1: Generate additional data - # We will use data generated above, but we also want data at additional timesteps - print('Generating data...') - data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP) - data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2) - data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4) - data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2) - data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4) + # We will use data generated above, but we also want data at additional timesteps + print("Generating data...") + data = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) + data_half = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, threshold_keys="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) # Step 2: Data Prep # We need to add the timestep as a input u = np.array([[TIMESTEP] for _ in data.inputs]) - u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs]) - u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs]) - u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs]) - u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) - # In this case we are saying that velocity is directly measurable, + # In this case we are saying that velocity is directly measurable, # unlike the original model. This is necessary to calculate the events. # Since the outputs will then match the states, we pass in the states below u_data = [u, u_half, u_quarter, u_twice, u_four] - z_data = [data.states, data_half.states, data_quarter.states, data_twice.states, data_four.states] + z_data = [ + data.states, + data_half.states, + data_quarter.states, + data_twice.states, + data_four.states, + ] # Step 3: Create model - print('Creating model...') + print("Creating model...") - # Create a subclass of LSTMStateTransitionModel, + # Create a subclass of LSTMStateTransitionModel, # overriding event-related methods and members class LSTMThrownObject(LSTMStateTransitionModel): events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] def initialize(self, u=None, z=None): @@ -71,55 +89,68 @@ def event_state(self, x): # Using class name instead of self allows the class to be subclassed z = LSTMThrownObject.output(self, x) # Logic from ThrownObject.event_state, using output instead of state - self.max_x = max(self.max_x, z['x']) # Maximum altitude + self.max_x = max(self.max_x, z["x"]) # Maximum altitude return { - 'falling': max(z['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': max(z['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + z["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + z["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } def threshold_met(self, x): z = LSTMThrownObject.output(self, x) # Logic from ThrownObject.threshold_met, using output instead of state - return { - 'falling': z['v'] < 0, - 'impact': z['x'] <= 0 - } - + return {"falling": z["v"] < 0, "impact": z["x"] <= 0} + # Step 4: Generate Model - print('Building model...') + print("Building model...") m2 = LSTMThrownObject.from_data( - inputs=u_data, + inputs=u_data, outputs=z_data, - window=4, - epochs=30, - input_keys = ['dt'], - output_keys = m.states) + window=4, + epochs=30, + input_keys=["dt"], + output_keys=m.states, + ) m2.plot_history() # Step 5: Simulate with model t_counter = 0 x_counter = m.initialize() - def future_loading3(t, x = None): + + def future_loading3(t, x=None): nonlocal t_counter, x_counter - z = m2.InputContainer({'x_t-1': x_counter['x'], 'v_t-1': x_counter['v'], 'dt': t - t_counter}) + z = m2.InputContainer( + {"x_t-1": x_counter["x"], "v_t-1": x_counter["v"], "dt": t - t_counter} + ) x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z # Use new dt, not used in training - # Using a dt not used in training will demonstrate the model's + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set - data = m.simulate_to_threshold(future_loading, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3) - results3 = m2.simulate_to_threshold(future_loading3, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3) + data = m.simulate_to_threshold( + future_loading, threshold_keys="impact", dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m2.simulate_to_threshold( + future_loading3, + threshold_keys="impact", + dt=TIMESTEP * 3, + save_freq=TIMESTEP * 3, + ) # Step 6: Compare Results - print('Comparing results...') - print('Predicted impact time:') - print('\tOriginal: ', data.times[-1]) - print('\tLSTM: ', results3.times[-1]) - data.outputs.plot(title='original model') - results3.outputs.plot(title='generated model') + print("Comparing results...") + print("Predicted impact time:") + print("\tOriginal: ", data.times[-1]) + print("\tLSTM: ", results3.times[-1]) + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/caaed269579b6156f6d8d9d68d3cb5bb/sensitivity.py b/docs/_downloads/caaed269579b6156f6d8d9d68d3cb5bb/sensitivity.py index f6003962..d2ed6d0f 100644 --- a/docs/_downloads/caaed269579b6156f6d8d9d68d3cb5bb/sensitivity.py +++ b/docs/_downloads/caaed269579b6156f6d8d9d68d3cb5bb/sensitivity.py @@ -2,52 +2,79 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example performing a sensitivity analysis on a new model. +Example performing a sensitivity analysis on a new model. """ import numpy as np + # Deriv prog model was selected because the model can be described as x' = x + dx*dt from progpy.models.thrown_object import ThrownObject + def run_example(): # Demo model # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return m.InputContainer({}) # Step 3: Setup range on parameters considered thrower_height_range = np.arange(1.2, 2.1, 0.1) - # Step 4: Sim for each - event = 'impact' + # Step 4: Sim for each + event = "impact" eods = np.empty(len(thrower_height_range)) - for (i, thrower_height) in zip(range(len(thrower_height_range)), thrower_height_range): - m.parameters['thrower_height'] = thrower_height - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt =1e-3, save_freq =10) + for i, thrower_height in zip( + range(len(thrower_height_range)), thrower_height_range + ): + m.parameters["thrower_height"] = thrower_height + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=1e-3, save_freq=10 + ) eods[i] = simulated_results.times[-1] # Step 5: Analysis - print('For a reasonable range of heights, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3))) - sensitivity = (eods[-1]-eods[0])/(thrower_height_range[-1] - thrower_height_range[0]) - print(' - Average sensitivity: {} s per cm height'.format(round(sensitivity/100, 6))) + print( + "For a reasonable range of heights, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / ( + thrower_height_range[-1] - thrower_height_range[0] + ) + print( + " - Average sensitivity: {} s per cm height".format( + round(sensitivity / 100, 6) + ) + ) print(" - It seems impact time is not very sensitive to thrower's height") # Now lets repeat for throw speed throw_speed_range = np.arange(20, 40, 1) eods = np.empty(len(throw_speed_range)) - for (i, throw_speed) in zip(range(len(throw_speed_range)), throw_speed_range): - m.parameters['throwing_speed'] = throw_speed - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':1e-3, 'save_freq':10}) + for i, throw_speed in zip(range(len(throw_speed_range)), throw_speed_range): + m.parameters["throwing_speed"] = throw_speed + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], options={"dt": 1e-3, "save_freq": 10} + ) eods[i] = simulated_results.times[-1] - print('\nFor a reasonable range of throwing speeds, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3))) - sensitivity = (eods[-1]-eods[0])/(throw_speed_range[-1] - throw_speed_range[0]) - print(' - Average sensitivity: {} s per m/s speed'.format(round(sensitivity/100, 6))) + print( + "\nFor a reasonable range of throwing speeds, impact time is between {} and {}".format( + round(eods[0], 3), round(eods[-1], 3) + ) + ) + sensitivity = (eods[-1] - eods[0]) / (throw_speed_range[-1] - throw_speed_range[0]) + print( + " - Average sensitivity: {} s per m/s speed".format( + round(sensitivity / 100, 6) + ) + ) print(" - It seems impact time is much more dependent on throwing speed") -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/cad7aa59ea2959726ca36bd7ecab2b63/pce.py b/docs/_downloads/cad7aa59ea2959726ca36bd7ecab2b63/pce.py new file mode 100644 index 00000000..6281a284 --- /dev/null +++ b/docs/_downloads/cad7aa59ea2959726ca36bd7ecab2b63/pce.py @@ -0,0 +1,103 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. +# This ensures that the directory containing examples is in the python search directories + +""" +This example demonstrates the Polynomial Chaos Expansion (PCE) Surrogate Direct Model functionality. PCE is a method by which the behavior of a model can be approximated by a polynomial. In this case the relationship between future loading and time of event. The result is a direct surrogate model that can be used to estimate time of event given a loading profile, without requiring the original model to be simulated. The resulting estimation is MUCH faster than simulating the model. + +This functionality is especially useful in cases where you need to evaluate a large number of loading profiles in a short amount of time or with limited computational resources. For example, when you would like to optimize a loading profile. In that case, it would not make sense to have a state-transition model in the inner loop of the optimization, since the state transition model would be called many times. Instead, you can use a direct surrogate model to estimate time of event directly from the loading profile. + +In this example, a PCE surrogate model is generated for the BatteryElectroChemEOD model. The surrogate is used to estimate time of event for a number of loading profiles. The result is compared to the actual time of event for the same loading profiles, which were generated by simulating the model. +""" + +import chaospy as cp +import matplotlib.pyplot as plt +import numpy as np +from progpy.models import BatteryElectroChemEOD +from progpy.data_models import PCE +import scipy as sp + + +def run_example(): + # First lets define some constants + + # Time step used in simulation + DT = 0.5 + + # The number of samples to used in the PCE + # Larger gives a better approximation, but takes longer to generate + N_SAMPLES = 100 + + # The distribution of the input current + # This defines the expected values for the input + # In this case we're saying that the input current can be anything between 3-8 amps + # With a uniform distribution (i.e., no value in that range is more likely than any other) + INPUT_CURRENT_DIST = cp.Uniform(3, 8) + # Note: These discharge rates are VERY high. This is only for demonstration purposes. + # The high discharge rate will accelerate the degradation of the battery, + # which will cause the example to run faster + + # Step 1: Define base model + # First let's define the base model that we're creating a surrogate for. + m = BatteryElectroChemEOD(process_noise=0) + x0 = m.initialize() # Initial State + + # Step 2: Build surrogate + # Next we build the surrogate model from the base model + # To build the model we pass in the distributions of possible values for each input. + # We also provide the max_time. This is the maximum time that the surrogate will be used for. + # We dont expect any battery to last more than 4000 seconds given the high discharge curves we're passing in. + m_surrogate = PCE.from_model( + m, + x0, # Model State + {"i": INPUT_CURRENT_DIST}, # Distribution of inputs + dt=DT, + times=[i * 1000 for i in range(5)], + N=N_SAMPLES, + ) + # The result (m_surrogate) is a model that can be used to VERY quickly estimate time_of_event for a new loading profile. + + # Note: this is only valid for the initial state (x0) of the battery. + # To train for another state pass in the parameter x (type StateContainer). + # e.g. m_surrogate = PCE.from_model(m, SOME_OTHER_STATE, ...) + + # ------------------------------------------------------------------------- + # Now let's test the surrogate + # We will do this by generating some new loading profiles + # then comparing the results to the actual time of event (from simulation) + N_TEST_CASES = 25 # The number of loading profiles to test + + # some containers for the results + surrogate_results = np.empty(N_TEST_CASES, dtype=np.float64) + gt_results = np.empty(N_TEST_CASES, dtype=np.float64) + + # Future loading- interpolates values from randomly sampled values + def future_loading(t, x=None): + return m.InputContainer(interpolator(t)[np.newaxis].T) + + TEST_SAMPLES = m_surrogate.parameters["J"].sample( + size=N_TEST_CASES, rule="latin_hypercube" + ) + for i in range(N_TEST_CASES): + # Generate a new loading profile + interpolator = sp.interpolate.interp1d( + m_surrogate.parameters["times"], TEST_SAMPLES[:, i] + ) + + # Estimate time of event from ground truth (original model) and surrogate + gt_results[i] = m.time_of_event(x0, future_loading, dt=DT)["EOD"] + surrogate_results[i] = m_surrogate.time_of_event(x0, future_loading)["EOD"] + + # Plot results + # Note here that the approximation is very good, but not perfect + # Approximation would be even better with more samples + plt.scatter(gt_results, surrogate_results) + max_val = max(max(gt_results), max(surrogate_results)) + plt.plot([0, max_val], [0, max_val], "k--") + plt.xlabel("Ground Truth (s)") + plt.ylabel("PCE (s)") + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py b/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py index 3b7fea9c..ed14def9 100644 --- a/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py +++ b/docs/_downloads/cadc512c2baefeca720f0906f97221af/uav_dynamics_model.py @@ -16,10 +16,7 @@ def run_example(): # Initialize vehicle vehicle = SmallRotorcraft( - dt=0.05, - vehicle_model='tarot18', - process_noise=0, - measurement_noise=0 + dt=0.05, vehicle_model="tarot18", process_noise=0, measurement_noise=0 ) # EXAMPLE 1: @@ -33,20 +30,87 @@ def run_example(): # Here, we specify waypoints in a dictionary and then pass # lat/lon/alt/ETAs into the trajectory class - lat_deg = np.array([37.09776, 37.09776, 37.09776, 37.09798, 37.09748, 37.09665, 37.09703, 37.09719, 37.09719, 37.09719, 37.09719, 37.09748, 37.09798, 37.09776, 37.09776]) - lon_deg = np.array([-76.38631, -76.38629, -76.38629, -76.38589, -76.3848, -76.38569, -76.38658, -76.38628, -76.38628, -76.38628, -76.38628, -76.3848, -76.38589, -76.38629, -76.38629]) - alt_ft = np.array([-1.9682394, 164.01995, 164.01995, 164.01995, 164.01995, 164.01995, 164.01995, 164.01995, 0.0, 0.0, 164.01995, 164.01995, 164.01995, 164.01995, 0.0]) - time_unix = [1544188336, 1544188358, 1544188360, 1544188377, 1544188394, 1544188411, 1544188428, 1544188496, 1544188539, 1544188584, 1544188601, 1544188635, 1544188652, 1544188672, 1544188692] + lat_deg = np.array( + [ + 37.09776, + 37.09776, + 37.09776, + 37.09798, + 37.09748, + 37.09665, + 37.09703, + 37.09719, + 37.09719, + 37.09719, + 37.09719, + 37.09748, + 37.09798, + 37.09776, + 37.09776, + ] + ) + lon_deg = np.array( + [ + -76.38631, + -76.38629, + -76.38629, + -76.38589, + -76.3848, + -76.38569, + -76.38658, + -76.38628, + -76.38628, + -76.38628, + -76.38628, + -76.3848, + -76.38589, + -76.38629, + -76.38629, + ] + ) + alt_ft = np.array( + [ + -1.9682394, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 0.0, + 0.0, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 0.0, + ] + ) + time_unix = [ + 1544188336, + 1544188358, + 1544188360, + 1544188377, + 1544188394, + 1544188411, + 1544188428, + 1544188496, + 1544188539, + 1544188584, + 1544188601, + 1544188635, + 1544188652, + 1544188672, + 1544188692, + ] # Generate trajectory # ===================== # Generate trajectory object and pass the route (waypoints, ETA) to it - traj = Trajectory(lat=lat_deg, - lon=lon_deg, - alt=alt_ft * 0.3048, - etas=time_unix) + traj = Trajectory(lat=lat_deg, lon=lon_deg, alt=alt_ft * 0.3048, etas=time_unix) - ref_traj = traj.generate(dt=vehicle.parameters['dt']) + ref_traj = traj.generate(dt=vehicle.parameters["dt"]) # Define controller and build scheduled control. The controller acts as a # future_loading function when simulating @@ -61,9 +125,8 @@ def run_example(): # Simulate vehicle to fly trajectory traj_results = vehicle.simulate_to_threshold( - ctrl, - dt=vehicle.parameters['dt'], - save_freq=vehicle.parameters['dt']) + ctrl, dt=vehicle.parameters["dt"], save_freq=vehicle.parameters["dt"] + ) # Visualize Results vehicle.visualize_traj(pred=traj_results, ref=ref_traj) @@ -71,17 +134,19 @@ def run_example(): # EXAMPLE 2: # In this example, we define another trajectory through the same # waypoints but with speeds defined instead of ETAs - + # Generate trajectory object and pass the route (lat/lon/alt, no ETAs) # and speed information to it - traj_speed = Trajectory(lat=lat_deg, - lon=lon_deg, - alt=alt_ft * 0.3048, - cruise_speed=8.0, - ascent_speed=2.0, - descent_speed=3.0, - landing_speed=2.0) - ref_traj_speeds = traj_speed.generate(dt=vehicle.parameters['dt']) + traj_speed = Trajectory( + lat=lat_deg, + lon=lon_deg, + alt=alt_ft * 0.3048, + cruise_speed=8.0, + ascent_speed=2.0, + descent_speed=3.0, + landing_speed=2.0, + ) + ref_traj_speeds = traj_speed.generate(dt=vehicle.parameters["dt"]) # Define controller and build scheduled control. This time we'll use LQR_I, # which is a linear quadratic regulator with integral action. @@ -90,12 +155,9 @@ def run_example(): # This version of LQR_I compensates for integral errors in the position of # the vehicle, i.e., x, y, z variables of the state vector. ctrl_speeds = LQR_I(ref_traj_speeds, vehicle) - + # Set simulation options - options = { - 'dt': vehicle.parameters['dt'], - 'save_freq': vehicle.parameters['dt'] - } + options = {"dt": vehicle.parameters["dt"], "save_freq": vehicle.parameters["dt"]} # Simulate vehicle to fly trajectory traj_results_speeds = vehicle.simulate_to_threshold(ctrl_speeds, **options) @@ -115,7 +177,25 @@ def run_example(): # First, we'll re-define the ETAs in the waypoints dictionary # (since we deleted them from the waypoints in Example 2) - time_unix = np.array([1544188336, 1544188358, 1544188360, 1544188377, 1544188394, 1544188411, 1544188428, 1544188496, 1544188539, 1544188584, 1544188601, 1544188635, 1544188652, 1544188672, 1544188692]) + time_unix = np.array( + [ + 1544188336, + 1544188358, + 1544188360, + 1544188377, + 1544188394, + 1544188411, + 1544188428, + 1544188496, + 1544188539, + 1544188584, + 1544188601, + 1544188635, + 1544188652, + 1544188672, + 1544188692, + ] + ) # Extract time information for desired interval, starting at waypoint 10 # and ending at waypoint 13 @@ -124,32 +204,33 @@ def run_example(): sim_time = end_time - start_time # Define initial state, x0, based on reference trajectory at start_time - ind = np.where(ref_traj['t'] == start_time) + ind = np.where(ref_traj["t"] == start_time) x0 = {key: ref_traj[key][ind][0] for key in ref_traj.keys()} - vehicle.parameters['x0'] = x0 + vehicle.parameters["x0"] = x0 # Define simulation parameters - note that we must define t0 as start_time # since we are not starting at the default of t0 = 0 options = { - 'dt': vehicle.parameters['dt'], - 'save_freq': vehicle.parameters['dt'], - 't0': start_time + "dt": vehicle.parameters["dt"], + "save_freq": vehicle.parameters["dt"], + "t0": start_time, } # Simulate starting from this initial state from start_time to end_time traj_results_interval = vehicle.simulate_to(sim_time, ctrl, **options) # Plot results with Example 1 results to show equivalence on this interval - z_1 = [output['z'] for output in traj_results.outputs] - z_4 = [output['z'] for output in traj_results_interval.outputs] + z_1 = [output["z"] for output in traj_results.outputs] + z_4 = [output["z"] for output in traj_results_interval.outputs] fig, ax = plt.subplots() - ax.plot(traj_results.times, z_1, '-b', label='Example 1') - ax.plot(traj_results_interval.times, z_4, '--r', label='Example 3') - ax.set_xlabel('time, s', fontsize=14) - ax.set_ylabel('altitude, m', fontsize=14) + ax.plot(traj_results.times, z_1, "-b", label="Example 1") + ax.plot(traj_results_interval.times, z_4, "--r", label="Example 3") + ax.set_xlabel("time, s", fontsize=14) + ax.set_ylabel("altitude, m", fontsize=14) ax.legend() + # This allows the module to be executed directly -if __name__ == '__main__': +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/cb37c25b3e11f6fe8987e558bb775c53/06_Combining Models.ipynb b/docs/_downloads/cb37c25b3e11f6fe8987e558bb775c53/06_Combining Models.ipynb index a568c54d..b43ba710 100644 --- a/docs/_downloads/cb37c25b3e11f6fe8987e558bb775c53/06_Combining Models.ipynb +++ b/docs/_downloads/cb37c25b3e11f6fe8987e558bb775c53/06_Combining Models.ipynb @@ -4,21 +4,33 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Combining Prognostic Models" + "# 6. Combining Prognostic Models" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This section demonstrates how prognostic models can be combined. There are two times in which this is useful: \n", + "This section demonstrates how prognostic models can be combined. There are two instances in which this is useful: \n", "\n", - "1. When combining multiple models of different inter-related systems into one system-of-system model (i.e., [Composite Models](https://nasa.github.io/progpy/api_ref/prog_models/CompositeModel.html)), or\n", + "1. Combining multiple models of different inter-related systems into one system-of-system model (i.e., [Composite Models](https://nasa.github.io/progpy/api_ref/prog_models/CompositeModel.html)), or\n", "2. Combining multiple models of the same system to be simulated together and aggregated (i.e., [Ensemble Models](https://nasa.github.io/progpy/api_ref/prog_models/EnsembleModel.html) or [Mixture of Expert Models](https://nasa.github.io/progpy/api_ref/progpy/MixtureOfExperts.html)). This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors. \n", "\n", "These two methods for combining models are described in the following sections." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "\n", + "* [Composite Model](#Composite-Model)\n", + "* [Ensemble Model](#Ensemble-Model)\n", + "* [Mixture of Experts (MoE)](#Mixture-of-Experts-(MoE))\n", + "* [Conclusion](#Conclusion)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -30,11 +42,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "A CompositeModel is a PrognosticsModel that is composed of multiple PrognosticsModels. This is a tool for modeling system-of-systems. i.e., interconnected systems, where the behavior and state of one system affects the state of another system. The composite prognostics models are connected using defined connections between the output or state of one model, and the input of another model. The resulting CompositeModel behaves as a single model.\n", + "A `CompositeModel` is a `PrognosticsModel` that is composed of multiple `PrognosticsModels`. This is a tool for modeling system-of-systems. (i.e., interconnected systems), where the behavior and state of one system affects the state of another system. The composite prognostics models are connected using defined connections between the output or state of one model, and the input of another model. The resulting `CompositeModel` behaves as a single model.\n", "\n", - "To illustrate this, we will create a composite model of an aircraft's electric powertrain, combining the DCMotor, ESC, and PropellerLoad models. The Electronic Speed Controller (ESC) converts a commanded duty (i.e., throttle) to signals to the motor. The motor then acts on the signals from the ESC to spin the load, which enacts a torque on the motor (in this case from air resistence).\n", + "To illustrate this, we will create a composite model of an aircraft's electric powertrain, combining the `DCMotor`, `ESC`, and `PropellerLoad` models. The Electronic Speed Controller (`ESC`) converts a commanded duty (i.e., throttle) to signals to the motor. The motor then acts on the signals from the ESC to spin the load, which enacts a torque on the motor (in this case from air resistence).\n", "\n", - "First we will import the used models, and the CompositeModel class" + "First we will import the used models, and the `CompositeModel` class." ] }, { @@ -69,7 +81,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next we have to define the connections between the systems. Let's first define the connections from the DCMotor to the propeller load. For this, we'll need to look at the DCMotor states and understand how they influence the PropellerLoad inputs." + "Next we have to define the connections between the systems. Let's first define the connections from the `DCMotor` to the propeller load. For this, we'll need to look at the `DCMotor` states and understand how they influence the `PropellerLoad` inputs." ] }, { @@ -78,17 +90,15 @@ "metadata": {}, "outputs": [], "source": [ - "print('motor states: ', m_motor.states)\n", - "print('load inputs: ', m_load.inputs)" + "print(\"motor states: \", m_motor.states)\n", + "print(\"load inputs: \", m_load.inputs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Each of the states and inputs are described in the model documentation at [DC Motor Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#dc-motor) and [Propeller Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#propellerload)\n", - "\n", - "From reading the documentation we understand that the propeller's velocity is from the motor, so we can define the first connection:" + "Each of the states and inputs are described in the model documentation at [DC Motor Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#dc-motor) and [Propeller Docs](https://nasa.github.io/progpy/api_ref/prog_models/IncludedModels.html#propellerload). From reading the documentation we understand that the propeller's velocity is from the motor, so we can define the first connection:" ] }, { @@ -97,16 +107,14 @@ "metadata": {}, "outputs": [], "source": [ - "connections = [\n", - " ('DCMotor.v_rot', 'PropellerLoad.v_rot')\n", - "]" + "connections = [(\"DCMotor.v_rot\", \"PropellerLoad.v_rot\")]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Connections are defined as couples where the first value is the input for the second value. The connection above tells the composite model to feed the DCMotor's v_rot into the PropellerLoad's input v_rot.\n", + "Connections are defined as couples where the first value is the input for the second value. The connection above tells the composite model to feed the `DCMotor`'s `v_rot` into the `PropellerLoad`'s input `v_rot`.\n", "\n", "Next, let's look at the connections the other direction, from the load to the motor." ] @@ -117,8 +125,8 @@ "metadata": {}, "outputs": [], "source": [ - "print('load states: ', m_load.states)\n", - "print('motor inputs: ', m_motor.inputs)" + "print(\"load states: \", m_load.states)\n", + "print(\"motor inputs: \", m_motor.inputs)" ] }, { @@ -134,14 +142,14 @@ "metadata": {}, "outputs": [], "source": [ - "connections.append(('PropellerLoad.t_l', 'DCMotor.t_l'))" + "connections.append((\"PropellerLoad.t_l\", \"DCMotor.t_l\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now we will repeat the exercise with the DCMotor and ESC." + "Now we will repeat the exercise with the `DCMotor` and `ESC`." ] }, { @@ -150,15 +158,15 @@ "metadata": {}, "outputs": [], "source": [ - "print('ESC states: ', m_esc.states)\n", - "print('motor inputs: ', m_motor.inputs)\n", - "connections.append(('ESC.v_a', 'DCMotor.v_a'))\n", - "connections.append(('ESC.v_b', 'DCMotor.v_b'))\n", - "connections.append(('ESC.v_c', 'DCMotor.v_c'))\n", + "print(\"ESC states: \", m_esc.states)\n", + "print(\"motor inputs: \", m_motor.inputs)\n", + "connections.append((\"ESC.v_a\", \"DCMotor.v_a\"))\n", + "connections.append((\"ESC.v_b\", \"DCMotor.v_b\"))\n", + "connections.append((\"ESC.v_c\", \"DCMotor.v_c\"))\n", "\n", - "print('motor states: ', m_motor.states)\n", - "print('ESC inputs: ', m_esc.inputs)\n", - "connections.append(('DCMotor.theta', 'ESC.theta'))" + "print(\"motor states: \", m_motor.states)\n", + "print(\"ESC inputs: \", m_esc.inputs)\n", + "connections.append((\"DCMotor.theta\", \"ESC.theta\"))" ] }, { @@ -174,16 +182,14 @@ "metadata": {}, "outputs": [], "source": [ - "m_powertrain = CompositeModel(\n", - " (m_esc, m_load, m_motor), \n", - " connections=connections)" + "m_powertrain = CompositeModel((m_esc, m_load, m_motor), connections=connections)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The resulting model includes two inputs, ESC voltage (from the battery) and duty (i.e., commanded throttle). These are the only two inputs not connected internally from the original three models. The states are a combination of all the states of every system. Finally, the outputs are a combination of all the outputs from each of the individual systems. " + "The resulting model includes two inputs, `ESC` voltage (from the battery) and duty (i.e., commanded throttle). These are the only two inputs not connected internally from the original three models. The states are a combination of all the states of every system. Finally, the outputs are a combination of all the outputs from each of the individual systems. " ] }, { @@ -192,9 +198,9 @@ "metadata": {}, "outputs": [], "source": [ - "print('inputs: ', m_powertrain.inputs)\n", - "print('states: ', m_powertrain.states)\n", - "print('outputs: ', m_powertrain.outputs)" + "print(\"inputs: \", m_powertrain.inputs)\n", + "print(\"states: \", m_powertrain.states)\n", + "print(\"outputs: \", m_powertrain.outputs)" ] }, { @@ -211,17 +217,19 @@ "outputs": [], "source": [ "m_powertrain = CompositeModel(\n", - " (m_esc, m_load, m_motor), \n", - " connections=connections,\n", - " outputs={'DCMotor.v_rot', 'DCMotor.theta'})\n", - "print('outputs: ', m_powertrain.outputs)" + " (m_esc, m_load, m_motor),\n", + " connections=connections,\n", + " outputs={\"DCMotor.v_rot\", \"DCMotor.theta\"},\n", + ")\n", + "\n", + "print(\"outputs: \", m_powertrain.outputs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now the outputs are only DCMotor angle and velocity.\n", + "Now the outputs are only `DCMotor` angle and velocity.\n", "\n", "The resulting model can be used in simulation, state estimation, and prediction the same way any other model would be, as demonstrated below:" ] @@ -232,16 +240,34 @@ "metadata": {}, "outputs": [], "source": [ - "load = m_powertrain.InputContainer({\n", - " 'ESC.duty': 1, # 100% Throttle\n", - " 'ESC.v': 23\n", - " })\n", + "load = m_powertrain.InputContainer(\n", + " {\n", + " \"ESC.duty\": 1, # 100% Throttle\n", + " \"ESC.v\": 23,\n", + " }\n", + ")\n", + "\n", + "\n", "def future_loading(t, x=None):\n", " return load\n", "\n", - "simulated_results = m_powertrain.simulate_to(1, future_loading, dt=2.5e-5, save_freq=2e-2)\n", - "fig = simulated_results.outputs.plot(compact=False, keys=['DCMotor.v_rot'], ylabel='Velocity')\n", - "fig = simulated_results.states.plot(keys=['DCMotor.i_b', 'DCMotor.i_c', 'DCMotor.i_a'], ylabel='ESC Currents')" + "\n", + "simulated_results = m_powertrain.simulate_to(\n", + " 1, future_loading, dt=2.5e-5, save_freq=2e-2\n", + ")\n", + "\n", + "fig = simulated_results.outputs.plot(\n", + " keys=[\"DCMotor.v_rot\"],\n", + " ylabel=\"velocity (rad/sec)\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model output\",\n", + ")\n", + "fig = simulated_results.states.plot(\n", + " keys=[\"DCMotor.i_b\", \"DCMotor.i_c\", \"DCMotor.i_a\"],\n", + " ylabel=\"ESC currents\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model states\",\n", + ")" ] }, { @@ -257,7 +283,7 @@ "metadata": {}, "outputs": [], "source": [ - "m_powertrain.parameters['PropellerLoad.D'] = 1" + "m_powertrain.parameters[\"PropellerLoad.D\"] = 1" ] }, { @@ -273,16 +299,29 @@ "metadata": {}, "outputs": [], "source": [ - "simulated_results = m_powertrain.simulate_to(1, future_loading, dt=2.5e-5, save_freq=2e-2)\n", - "fig = simulated_results.outputs.plot(compact=False, keys=['DCMotor.v_rot'], ylabel='Velocity')\n", - "fig = simulated_results.states.plot(keys=['DCMotor.i_b', 'DCMotor.i_c', 'DCMotor.i_a'], ylabel='ESC Currents')" + "simulated_results = m_powertrain.simulate_to(\n", + " 1, future_loading, dt=2.5e-5, save_freq=2e-2\n", + ")\n", + "\n", + "fig = simulated_results.outputs.plot(\n", + " keys=[\"DCMotor.v_rot\"],\n", + " ylabel=\"velocity (rad/sec)\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model output with increased load\",\n", + ")\n", + "fig = simulated_results.states.plot(\n", + " keys=[\"DCMotor.i_b\", \"DCMotor.i_c\", \"DCMotor.i_a\"],\n", + " ylabel=\"ESC Currents\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Composite model with increased load states\",\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note: A function can be used to perform simple transitions between models. For example, if you wanted to multiply the torque by 1.1 to represent some gearing or additional load, that could be done by defining a function, as follows" + "Note that a function can be used to perform simple transitions between models. For example, if you wanted to multiply the torque by 1.1 to represent some gearing or additional load, that could be done by defining a function, as follows:" ] }, { @@ -309,8 +348,8 @@ "outputs": [], "source": [ "connections = [\n", - " ('PropellerLoad.t_l', 'function.t_l'),\n", - " ('function.return', 'DCMotor.t_l')\n", + " (\"PropellerLoad.t_l\", \"function.t_l\"),\n", + " (\"function.return\", \"DCMotor.t_l\"),\n", "]" ] }, @@ -318,7 +357,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's add back in the other connections and build the composite model" + "Now let's add back in the other connections and build the composite model." ] }, { @@ -327,20 +366,38 @@ "metadata": {}, "outputs": [], "source": [ - "connections.extend([\n", - " ('ESC.v_a', 'DCMotor.v_a'),\n", - " ('ESC.v_b', 'DCMotor.v_b'),\n", - " ('ESC.v_c', 'DCMotor.v_c'),\n", - " ('DCMotor.theta', 'ESC.theta'),\n", - " ('DCMotor.v_rot', 'PropellerLoad.v_rot')\n", - "])\n", + "connections.extend(\n", + " [\n", + " (\"ESC.v_a\", \"DCMotor.v_a\"),\n", + " (\"ESC.v_b\", \"DCMotor.v_b\"),\n", + " (\"ESC.v_c\", \"DCMotor.v_c\"),\n", + " (\"DCMotor.theta\", \"ESC.theta\"),\n", + " (\"DCMotor.v_rot\", \"PropellerLoad.v_rot\"),\n", + " ]\n", + ")\n", + "\n", "m_powertrain = CompositeModel(\n", - " (m_esc, m_load, m_motor, torque_multiplier), \n", - " connections=connections,\n", - " outputs={'DCMotor.v_rot', 'DCMotor.theta'})\n", - "simulated_results = m_powertrain.simulate_to(1, future_loading, dt=2.5e-5, save_freq=2e-2)\n", - "fig = simulated_results.outputs.plot(compact=False, keys=['DCMotor.v_rot'], ylabel='Velocity')\n", - "fig = simulated_results.states.plot(keys=['DCMotor.i_b', 'DCMotor.i_c', 'DCMotor.i_a'], ylabel='ESC Currents')" + " (m_esc, m_load, m_motor, torque_multiplier),\n", + " connections=connections,\n", + " outputs={\"DCMotor.v_rot\", \"DCMotor.theta\"},\n", + ")\n", + "\n", + "simulated_results = m_powertrain.simulate_to(\n", + " 1, future_loading, dt=2.5e-5, save_freq=2e-2\n", + ")\n", + "\n", + "fig = simulated_results.outputs.plot(\n", + " keys=[\"DCMotor.v_rot\"],\n", + " ylabel=\"velocity (rad/sec)\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Complete composite model output\",\n", + ")\n", + "fig = simulated_results.states.plot(\n", + " keys=[\"DCMotor.i_b\", \"DCMotor.i_c\", \"DCMotor.i_a\"],\n", + " ylabel=\"ESC currents\",\n", + " xlabel=\"time (s)\",\n", + " title=\"Complete composite model states\",\n", + ")" ] }, { @@ -377,16 +434,17 @@ "outputs": [], "source": [ "from progpy.models import BatteryCircuit\n", + "\n", "m_circuit = BatteryCircuit()\n", - "m_circuit_2 = BatteryCircuit(qMax = 7860)\n", - "m_circuit_3 = BatteryCircuit(qMax = 6700, Rs = 0.055)" + "m_circuit_2 = BatteryCircuit(qMax=7860)\n", + "m_circuit_3 = BatteryCircuit(qMax=6700, Rs=0.055)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's create an EnsembleModel which combines each of these." + "Let's create an `EnsembleModel` which combines each of these." ] }, { @@ -396,15 +454,15 @@ "outputs": [], "source": [ "from progpy import EnsembleModel\n", - "m_ensemble = EnsembleModel(\n", - " models=(m_circuit, m_circuit_2, m_circuit_3))" + "\n", + "m_ensemble = EnsembleModel(models=(m_circuit, m_circuit_2, m_circuit_3))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's evaluate the performance of the combined model using real battery data from NASA's prognostic data repository. See 07. Datasets for more detail on accessing data from this repository" + "Now let's evaluate the performance of the combined model using real battery data from [NASA's prognostic data repository](https://nasa.github.io/progpy/api_ref/progpy/DataSets.html)." ] }, { @@ -414,10 +472,11 @@ "outputs": [], "source": [ "from progpy.datasets import nasa_battery\n", + "\n", "data = nasa_battery.load_data(batt_id=8)[1]\n", "RUN_ID = 0\n", - "test_input = [{'i': i} for i in data[RUN_ID]['current']]\n", - "test_time = data[RUN_ID]['relativeTime']" + "test_input = [{\"i\": i} for i in data[RUN_ID][\"current\"]]\n", + "test_time = data[RUN_ID][\"relativeTime\"]" ] }, { @@ -446,15 +505,14 @@ "metadata": {}, "outputs": [], "source": [ - "t_end = test_time.iloc[-1]\n", - "from matplotlib import pyplot as plt" + "t_end = test_time.iloc[-1]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Next we will simulate the ensemble model" + "Next we will simulate the ensemble model." ] }, { @@ -481,10 +539,16 @@ "outputs": [], "source": [ "from matplotlib import pyplot as plt\n", - "fig = plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n", - "fig = plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='red', label='ensemble')\n", - "plt.xlabel('Time (s)')\n", - "plt.ylabel('Voltage')\n", + "\n", + "fig = plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", + "fig = plt.plot(\n", + " results_ensemble.times,\n", + " [z[\"v\"] for z in results_ensemble.outputs],\n", + " color=\"red\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", "plt.legend()" ] }, @@ -492,7 +556,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The ensemble model actually performs pretty poorly here. This is mostly because there's an outlier model (m_circuit_3). This can be resolved using a different aggregation method. By default, aggregation uses the mean. Let's update the ensemble model to use median and resimulate" + "The ensemble model actually performs pretty poorly here. This is mostly because there's an outlier model (`m_circuit_3`). This can be resolved using a different aggregation method. By default, aggregation uses the mean. Let's update the ensemble model to use median and resimulate." ] }, { @@ -502,14 +566,25 @@ "outputs": [], "source": [ "import numpy as np\n", - "m_ensemble['aggregation_method'] = np.median\n", "\n", + "m_ensemble[\"aggregation_method\"] = np.median\n", "results_ensemble_median = m_ensemble.simulate_to(t_end, future_loading)\n", - "fig = plt.plot(results_ensemble_median.times, [z['v'] for z in results_ensemble_median.outputs], color='orange', label='ensemble -median')\n", - "fig = plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n", - "fig = plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='red', label='ensemble')\n", - "plt.xlabel('Time (s)')\n", - "plt.ylabel('Voltage')\n", + "\n", + "fig = plt.plot(\n", + " results_ensemble_median.times,\n", + " [z[\"v\"] for z in results_ensemble_median.outputs],\n", + " color=\"orange\",\n", + " label=\"ensemble -median\",\n", + ")\n", + "fig = plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", + "fig = plt.plot(\n", + " results_ensemble.times,\n", + " [z[\"v\"] for z in results_ensemble.outputs],\n", + " color=\"red\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", "plt.legend()" ] }, @@ -517,16 +592,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Much better!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The same ensemble approach can be used with a heterogeneous set of models that have different states.\n", + "Much better! \n", "\n", - "Here we will repeat the exercise using the battery electrochemisty and equivalent circuit models. The two models share one state in common (tb), but otherwise are different" + "The same ensemble approach can be used with a heterogeneous set of models that have different states. Here we will repeat the exercise using the battery electrochemisty and equivalent circuit models. The two models share one state in common (`tb`), but otherwise are different" ] }, { @@ -536,10 +604,11 @@ "outputs": [], "source": [ "from progpy.models import BatteryElectroChemEOD\n", + "\n", "m_electro = BatteryElectroChemEOD(qMobile=7800)\n", "\n", - "print('Electrochem states: ', m_electro.states)\n", - "print('Equivalent Circuit States', m_circuit.states)" + "print(\"Electrochem states: \", m_electro.states)\n", + "print(\"Equivalent Circuit States\", m_circuit.states)" ] }, { @@ -590,10 +659,25 @@ "outputs": [], "source": [ "plt.figure()\n", - "plt.plot(results_circuit1.times, [z['v'] for z in results_circuit1.outputs], color='blue', label='circuit')\n", - "plt.plot(results_electro.times, [z['v'] for z in results_electro.outputs], color='red', label='electro chemistry')\n", - "plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='yellow', label='ensemble')\n", - "plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n", + "plt.plot(\n", + " results_circuit1.times,\n", + " [z[\"v\"] for z in results_circuit1.outputs],\n", + " color=\"blue\",\n", + " label=\"circuit\",\n", + ")\n", + "plt.plot(\n", + " results_electro.times,\n", + " [z[\"v\"] for z in results_electro.outputs],\n", + " color=\"red\",\n", + " label=\"electro chemistry\",\n", + ")\n", + "plt.plot(\n", + " results_ensemble.times,\n", + " [z[\"v\"] for z in results_ensemble.outputs],\n", + " color=\"yellow\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", "plt.legend()" ] }, @@ -617,9 +701,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Mixture of Experts (MoE) models combine multiple models of the same system, similar to Ensemble models. Unlike Ensemble Models, the aggregation is done by selecting the \"best\" model. That is the model that has performed the best over the past. Each model will have a 'score' that is tracked in the state, and this determines which model is best.\n", + "Mixture of Experts (`MoE`) models combine multiple models of the same system, similar to ensemble models. Unlike ensemble models, the aggregation is done by selecting the \"best\" model. That is the model that has performed the best over the past. Each model will have a 'score' that is tracked in the state, and this determines which model is best.\n", "\n", - "To demonstrate this feature we will repeat the example from the ensemble model section, this time with a mixture of experts model. For this example to work you will have had to have run the ensemble model section example.\n", + "To demonstrate this feature we will repeat the example from the ensemble model section, this time with a mixture of experts model. For this example to work, we will have had to have run the ensemble model section above.\n", "\n", "First, let's combine the three battery circuit models into a single mixture of experts model." ] @@ -631,6 +715,7 @@ "outputs": [], "source": [ "from progpy import MixtureOfExpertsModel\n", + "\n", "m = MixtureOfExpertsModel((m_circuit_3, m_circuit_2, m_circuit))" ] }, @@ -655,7 +740,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Its states contain all of the states of each model, kept separate. Each individual model comprising the MoE model will be simulated separately, so the model keeps track of the states propogated through each model separately. The states also include scores for each model." + "Its states contain all of the states of each model, kept separate. Each individual model comprising the `MoE` model will be simulated separately, so the model keeps track of the states propogated through each model separately. The states also include scores for each model." ] }, { @@ -671,7 +756,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The MoE model inputs include both the comprised model input, `i` (current) and outputs: `v` (voltage) and `t`(temperature). The comprised model outputs are provided to update the scores of each model when performing state transition. If they are not provided when calling next_state, then scores would not be updated." + "The `MoE` model inputs include both the comprised model input, `i` (current) and outputs: `v` (voltage) and `t`(temperature). The comprised model outputs are provided to update the scores of each model when performing state transition. If they are not provided when calling next_state, then scores would not be updated." ] }, { @@ -687,7 +772,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's evaluate the performance of the combined model using real battery data from NASA's prognostic data repository, downloaded in the previous sections. See 07. Datasets for more detail on accessing data from this repository.\n", + "Now let's evaluate the performance of the combined model using real battery data from [NASA's prognostic data repository](https://nasa.github.io/progpy/api_ref/progpy/DataSets.html).\n", "\n", "To evaluate the model we first create a future loading function that uses the loading from the data." ] @@ -699,10 +784,15 @@ "outputs": [], "source": [ "results_moe = m.simulate_to(t_end, future_loading)\n", - "fig = plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth')\n", - "fig = plt.plot(results_moe.times, [z['v'] for z in results_moe.outputs], color='red', label='ensemble')\n", - "plt.xlabel('Time (s)')\n", - "plt.ylabel('Voltage')\n", + "fig = plt.plot(test_time, data[RUN_ID][\"voltage\"], color=\"green\", label=\"ground truth\")\n", + "fig = plt.plot(\n", + " results_moe.times,\n", + " [z[\"v\"] for z in results_moe.outputs],\n", + " color=\"red\",\n", + " label=\"ensemble\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", "plt.legend()" ] }, @@ -719,9 +809,9 @@ "metadata": {}, "outputs": [], "source": [ - "print('Model 1 Score: ', results_moe.states[-1]['BatteryCircuit._score'])\n", - "print('Model 2 Score: ', results_moe.states[-1]['BatteryCircuit_2._score'])\n", - "print('Model 3 Score: ', results_moe.states[-1]['BatteryCircuit_3._score'])" + "print(\"Model 1 Score: \", results_moe.states[-1][\"BatteryCircuit._score\"])\n", + "print(\"Model 2 Score: \", results_moe.states[-1][\"BatteryCircuit_2._score\"])\n", + "print(\"Model 3 Score: \", results_moe.states[-1][\"BatteryCircuit_3._score\"])" ] }, { @@ -739,26 +829,34 @@ "source": [ "x0 = m.initialize()\n", "x = m.next_state(\n", - " x=x0, \n", - " u=m.InputContainer({\n", - " 'i': test_input[0]['i'],\n", - " 'v': data[RUN_ID]['voltage'][0],\n", - " 't': data[RUN_ID]['temperature'][0]}),\n", - " dt=test_time[1]-test_time[0])\n", + " x=x0,\n", + " u=m.InputContainer(\n", + " {\n", + " \"i\": test_input[0][\"i\"],\n", + " \"v\": data[RUN_ID][\"voltage\"][0],\n", + " \"t\": data[RUN_ID][\"temperature\"][0],\n", + " }\n", + " ),\n", + " dt=test_time[1] - test_time[0],\n", + ")\n", "x = m.next_state(\n", - " x=x, \n", - " u=m.InputContainer({\n", - " 'i': test_input[1]['i'],\n", - " 'v': data[RUN_ID]['voltage'][1],\n", - " 't': data[RUN_ID]['temperature'][1]}),\n", - " dt=test_time[1]-test_time[0])" + " x=x,\n", + " u=m.InputContainer(\n", + " {\n", + " \"i\": test_input[1][\"i\"],\n", + " \"v\": data[RUN_ID][\"voltage\"][1],\n", + " \"t\": data[RUN_ID][\"temperature\"][1],\n", + " }\n", + " ),\n", + " dt=test_time[1] - test_time[0],\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's take a look at the model scores again" + "Let's take a look at the model scores again." ] }, { @@ -767,9 +865,9 @@ "metadata": {}, "outputs": [], "source": [ - "print('Model 1 Score: ', x['BatteryCircuit._score'])\n", - "print('Model 2 Score: ', x['BatteryCircuit_2._score'])\n", - "print('Model 3 Score: ', x['BatteryCircuit_3._score'])" + "print(\"Model 1 Score: \", x[\"BatteryCircuit._score\"])\n", + "print(\"Model 2 Score: \", x[\"BatteryCircuit_2._score\"])\n", + "print(\"Model 3 Score: \", x[\"BatteryCircuit_3._score\"])" ] }, { @@ -785,11 +883,18 @@ "metadata": {}, "outputs": [], "source": [ - "results_moe = m.simulate_to(t_end, future_loading, t0=test_time[1]-test_time[0], x=x)\n", - "fig = plt.plot(test_time[2:], data[RUN_ID]['voltage'][2:], color='green', label='ground truth')\n", - "fig = plt.plot(results_moe.times[2:], [z['v'] for z in results_moe.outputs][2:], color='red', label='moe')\n", - "plt.xlabel('Time (s)')\n", - "plt.ylabel('Voltage')\n", + "results_moe = m.simulate_to(t_end, future_loading, t0=test_time[1] - test_time[0], x=x)\n", + "fig = plt.plot(\n", + " test_time[2:], data[RUN_ID][\"voltage\"][2:], color=\"green\", label=\"ground truth\"\n", + ")\n", + "fig = plt.plot(\n", + " results_moe.times[2:],\n", + " [z[\"v\"] for z in results_moe.outputs][2:],\n", + " color=\"red\",\n", + " label=\"moe\",\n", + ")\n", + "plt.xlabel(\"Time (s)\")\n", + "plt.ylabel(\"Voltage\")\n", "plt.legend()" ] }, @@ -797,13 +902,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The fit here is much better. The MoE model learned which of the three models best fit the observed behavior.\n", + "The fit here is much better. The `MoE` model learned which of the three models best fit the observed behavior.\n", "\n", "In a prognostic application, the scores will be updated each time you use a state estimator (so long as you provide the output as part of the input). Then when performing a prediction the scores aren't updated, since outputs are not known.\n", "\n", "An example of when this would be useful is for cases where there are three common degradation paths or \"modes\" rather than a single model with uncertainty to represent every mode, the three modes can be represented by three different models. Once enough of the degradation path has been observed the observed mode will be the one reported.\n", "\n", - "If the model fit is expected to be stable (that is, the best model is not expected to change anymore). The best model can be extracted and used directly, like demonstrated below." + "If the model fit is expected to be stable (that is, the best model is not expected to change anymore). The best model can be extracted and used directly, as demonstrated below:" ] }, { @@ -820,14 +925,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Conclusions" + "## Conclusion" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In this section we demonstrated a few methods for treating multiple models as a single model. This is of interest when there are multiple models of different systems which are interdependent (CompositeModel), multiple models of the same system that portray different parts of the behavior or different candidate representations (EnsembleModel), or multiple models of the same system that represent possible degradation modes (MixtureOfExpertModel)." + "In this section we demonstrated a few methods for treating multiple models as a single model. This is of interest when there are multiple models of different systems which are interdependent (`CompositeModel`), multiple models of the same system that portray different parts of the behavior or different candidate representations (`EnsembleModel`), or multiple models of the same system that represent possible degradation modes (`MixtureOfExpertModel`).\n", + "\n", + "The next notebook __[07 State Estimation](07_State%20Estimation.ipynb)__ will be exploring state estimation, which is the process of estimating the current state of the system using sensor data and a prognostics model." ] } ], diff --git a/docs/_downloads/cd66a5990681e5b4bc3e40976451b926/generate_surrogate.py b/docs/_downloads/cd66a5990681e5b4bc3e40976451b926/generate_surrogate.py index de387ae3..63d4b3cc 100644 --- a/docs/_downloads/cd66a5990681e5b4bc3e40976451b926/generate_surrogate.py +++ b/docs/_downloads/cd66a5990681e5b4bc3e40976451b926/generate_surrogate.py @@ -6,166 +6,199 @@ .. dropdown:: More details - In this example, an instance of a battery model is created. The DMD DataModel is used to generate a surrogate of this battery model for specific loading schemes. This surrogate can be used in place of the original model, approximating it's behavior. Frequently, surrogate models run faster than the original, at the cost of some accuracy. The performance of the two models are then compared. + In this example, an instance of a battery model is created. The DMD DataModel is used to generate a surrogate of this battery model for specific loading schemes. This surrogate can be used in place of the original model, approximating it's behavior. Frequently, surrogate models run faster than the original, at the cost of some accuracy. The performance of the two models are then compared. """ import matplotlib.pyplot as plt from progpy.models import BatteryElectroChemEOD as Battery -def run_example(): - ### Example 1: Standard DMD Application + +def run_example(): + ### Example 1: Standard DMD Application ## Step 1: Create a model object batt = Battery() - ## Step 2: Define future loading functions for training data - # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired + ## Step 2: Define future loading functions for training data + # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired def future_loading_1(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 500): + # Variable (piece-wise) future loading scheme + if t < 500: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 0.5 else: i = 4.5 - return batt.InputContainer({'i': i}) - + return batt.InputContainer({"i": i}) + def future_loading_2(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 300): + # Variable (piece-wise) future loading scheme + if t < 300: i = 2 - elif (t < 800): + elif t < 800: i = 3.5 - elif (t < 1300): + elif t < 1300: i = 4 - elif (t < 1600): + elif t < 1600: i = 1.5 else: i = 5 - return batt.InputContainer({'i': i}) - + return batt.InputContainer({"i": i}) + load_functions = [future_loading_1, future_loading_2] - ## Step 3: generate surrogate model + ## Step 3: generate surrogate model # Simulation options for training data and surrogate model generation # Note: here dt is less than save_freq. This means the model will iterate forward multiple steps per saved point. - # This is commonly done to ensure accuracy. + # This is commonly done to ensure accuracy. options_surrogate = { - 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated - 'dt': 0.1, # For DMD, this value is the time step of the training data - 'trim_data_to': 0.7 # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data_to": 0.7, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model } # Set noise in Prognostics Model, default for surrogate model is also this value - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 - # Generate surrogate model - surrogate = batt.generate_surrogate(load_functions,**options_surrogate) + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) - ## Step 4: Use surrogate model + ## Step 4: Use surrogate model # Simulation options for implementation of surrogate model options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } - # Define loading profile + # Define loading profile def future_loading(t, x=None): - if (t < 600): + if t < 600: i = 3 - elif (t < 1000): + elif t < 1000: i = 2 - elif (t < 1500): + elif t < 1500: i = 1.5 else: i = 4 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Calculate Error - MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs) - print('Example 1 MSE:',MSE) + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 1 MSE:", MSE) # Not a very good approximation # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 1 Input') - simulated_results.outputs.plot(ylabel = 'Predicted Outputs (temperature and voltage)',title='Example 1 Predicted Outputs') - simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 1 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 1 Input") + simulated_results.outputs.plot( + ylabel="Predicted Outputs (temperature and voltage)", + title="Example 1 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 1 Predicted SOC" + ) # To visualize the accuracy of the approximation, run the high-fidelity model options_hf = { - 'dt': 0.1, - 'save_freq': 1, + "dt": 0.1, + "save_freq": 1, } - high_fidelity_results = batt.simulate_to_threshold(future_loading,**options_hf) + high_fidelity_results = batt.simulate_to_threshold(future_loading, **options_hf) # Save voltage results to compare - voltage_dmd = [simulated_results.outputs[iter1]['v'] for iter1 in range(len(simulated_results.times))] - voltage_hf = [high_fidelity_results.outputs[iter2]['v'] for iter2 in range(len(high_fidelity_results.times))] + voltage_dmd = [ + simulated_results.outputs[iter1]["v"] + for iter1 in range(len(simulated_results.times)) + ] + voltage_hf = [ + high_fidelity_results.outputs[iter2]["v"] + for iter2 in range(len(high_fidelity_results.times)) + ] plt.subplots() - plt.plot(simulated_results.times,voltage_dmd,'-b',label='DMD approximation') - plt.plot(high_fidelity_results.times, voltage_hf,'--r',label='High fidelity result') + plt.plot(simulated_results.times, voltage_dmd, "-b", label="DMD approximation") + plt.plot( + high_fidelity_results.times, voltage_hf, "--r", label="High fidelity result" + ) plt.legend() - plt.title('Comparing DMD approximation to high-fidelity model results') + plt.title("Comparing DMD approximation to high-fidelity model results") - ### Example 2: Add process_noise to the surrogate model - # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate) - surrogate.parameters['process_noise'] = 1e-04 - surrogate.parameters['process_noise_dist'] = 'normal' + ### Example 2: Add process_noise to the surrogate model + # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate) + surrogate.parameters["process_noise"] = 1e-04 + surrogate.parameters["process_noise_dist"] = "normal" - # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + # Simulate to threshold using DMD approximation + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 2 Input') - simulated_results.outputs.plot(keys=['v'],ylabel = 'Predicted Voltage (volts)', title='Example 2 Predicted Outputs') - simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 2 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 2 Input") + simulated_results.outputs.plot( + keys=["v"], + ylabel="Predicted Voltage (volts)", + title="Example 2 Predicted Outputs", + ) + simulated_results.event_states.plot( + ylabel="Predicted State of Charge", title="Example 2 Predicted SOC" + ) ### Example 3: Generate surrogate model with a subset of internal states, inputs, and/or outputs - # Note: we use the same loading profiles as defined in Ex. 1 + # Note: we use the same loading profiles as defined in Ex. 1 - ## Generate surrogate model + ## Generate surrogate model # Simulation options for training data and surrogate model generation options_surrogate = { - 'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated - 'dt': 0.1, # For DMD, this value is the time step of the training data - 'trim_data': 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model - 'state_keys': ['Vsn','Vsp','tb'], # Define internal states to be included in surrogate model - 'output_keys': ['v'] # Define outputs to be included in surrogate model + "save_freq": 1, # For DMD, this value is the time step for which the surrogate model is generated + "dt": 0.1, # For DMD, this value is the time step of the training data + "trim_data": 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model + "state_keys": [ + "Vsn", + "Vsp", + "tb", + ], # Define internal states to be included in surrogate model + "output_keys": ["v"], # Define outputs to be included in surrogate model } # Set noise in Prognostics Model, default for surrogate model is also this value - batt.parameters['process_noise'] = 0 + batt.parameters["process_noise"] = 0 - # Generate surrogate model - surrogate = batt.generate_surrogate(load_functions,**options_surrogate) + # Generate surrogate model + surrogate = batt.generate_surrogate(load_functions, **options_surrogate) - ## Use surrogate model - # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. - # The surrogate model results will be faster but less accurate than the original model. + ## Use surrogate model + # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model. + # The surrogate model results will be faster but less accurate than the original model. # Simulation options for implementation of surrogate model options_sim = { - 'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results + "save_freq": 1 # Frequency at which results are saved, or equivalently time step in results } # Simulate to threshold using DMD approximation - simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim) + simulated_results = surrogate.simulate_to_threshold(future_loading, **options_sim) # Calculate Error - MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs) - print('Example 3 MSE:',MSE) + MSE = batt.calc_error( + simulated_results.times, simulated_results.inputs, simulated_results.outputs + ) + print("Example 3 MSE:", MSE) # Plot results - simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 3 Input') - simulated_results.outputs.plot(ylabel = 'Outputs (voltage)',title='Example 3 Predicted Output') - simulated_results.event_states.plot(ylabel = 'State of Charge',title='Example 3 Predicted SOC') + simulated_results.inputs.plot(ylabel="Current (amps)", title="Example 3 Input") + simulated_results.outputs.plot( + ylabel="Outputs (voltage)", title="Example 3 Predicted Output" + ) + simulated_results.event_states.plot( + ylabel="State of Charge", title="Example 3 Predicted SOC" + ) plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/cd893e49d72315dbac3feda09cb48c98/model_gen.py b/docs/_downloads/cd893e49d72315dbac3feda09cb48c98/model_gen.py index 0924760d..f45f79d9 100644 --- a/docs/_downloads/cd893e49d72315dbac3feda09cb48c98/model_gen.py +++ b/docs/_downloads/cd893e49d72315dbac3feda09cb48c98/model_gen.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example generating models from constituent parts. +Example generating models from constituent parts. The model used for this example is that of an object thrown into the air, predicting the impact event """ @@ -10,75 +10,89 @@ # Deriv prog model was selected because the model can be described as x' = x + dx*dt from prog_models import PrognosticsModel + def run_example(): # Step 1: Define keys keys = { - 'inputs': [], # no inputs, no way to control - 'states': [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ], - 'outputs': [ # Anything we can measure - 'x' # Position (m) + "inputs": [], # no inputs, no way to control + "states": [ + "x", # Position (m) + "v", # Velocity (m/s) + ], + "outputs": [ # Anything we can measure + "x" # Position (m) + ], + "events": [ + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ], - 'events': [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground - ] } - thrower_height = 1.83 # m - throwing_speed = 40 # m/s + thrower_height = 1.83 # m + throwing_speed = 40 # m/s + # Step 2: Define initial state def initialize(u, z): return { - 'x': thrower_height, # Thrown, so initial altitude is height of thrower - 'v': throwing_speed # Velocity at which the ball is thrown - this guy is an professional baseball pitcher - } - + "x": thrower_height, # Thrown, so initial altitude is height of thrower + "v": throwing_speed, # Velocity at which the ball is thrown - this guy is an professional baseball pitcher + } + # Step 3: Define dx equation def dx(x, u): return { - 'x': x['v'], - 'v': -9.81 # Acceleration of gravity + "x": x["v"], + "v": -9.81, # Acceleration of gravity } # Step 3: Define equation for calculating output/measuremetn def output(x): - return { - 'x': x['x'] - } + return {"x": x["x"]} # Step 4: Define threshold equation def threshold_met(x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} # Step 5 (optional): Define event state equation- measurement of how close you are to threshold (0-1) - def event_state(x): - event_state.max_x = max(event_state.max_x, x['x']) # Maximum altitude + def event_state(x): + event_state.max_x = max(event_state.max_x, x["x"]) # Maximum altitude return { - 'falling': max(x['v']/throwing_speed,0), # Throwing speed is max speed - 'impact': max(x['x']/event_state.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max(x["v"] / throwing_speed, 0), # Throwing speed is max speed + "impact": max( + x["x"] / event_state.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } + event_state.max_x = 0 - + # Step 6: Generate model - m = PrognosticsModel.generate_model(keys, initialize, output, event_state_eqn = event_state, threshold_eqn=threshold_met, dx_eqn=dx) + m = PrognosticsModel.generate_model( + keys, + initialize, + output, + event_state_eqn=event_state, + threshold_eqn=threshold_met, + dx_eqn=dx, + ) - # Step 7: Setup for simulation + # Step 7: Setup for simulation def future_load(t, x=None): return {} # Step 8: Simulate to impact - event = 'impact' - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt = 0.005, save_freq=1, print = True) + event = "impact" + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1, print=True + ) # Print flight time - print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2))) + print( + "The object hit the ground in {} seconds".format( + round(simulated_results.times[-1], 2) + ) + ) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/ce3587166811ef0cd28f4edcf195c60e/new_model.py b/docs/_downloads/ce3587166811ef0cd28f4edcf195c60e/new_model.py index c18e8b41..d6be8f2c 100644 --- a/docs/_downloads/ce3587166811ef0cd28f4edcf195c60e/new_model.py +++ b/docs/_downloads/ce3587166811ef0cd28f4edcf195c60e/new_model.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example defining and using a new prognostics model. +Example defining and using a new prognostics model. In this example a simple state-transition model of an object thrown upward into the air is defined. That model is then used in simulation under different conditions and the results are displayed in different formats. """ @@ -15,106 +15,122 @@ class ThrownObject(PrognosticsModel): Model that simulates an object thrown into the air without air resistance """ - inputs = [] # no inputs, no way to control + inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] - outputs = [ # Anything we can measure - 'x' # Position (m) + "x", # Position (m) + "v", # Velocity (m/s) + ] + outputs = [ # Anything we can measure + "x" # Position (m) ] events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] # The Default parameters. Overwritten by passing parameters into constructor default_parameters = { - 'x0': { # Initial State - 'x': 1.83, # Height of thrower (m) - 'v': 40 # Velocity at which the object is thrown (m/s) + "x0": { # Initial State + "x": 1.83, # Height of thrower (m) + "v": 40, # Velocity at which the object is thrown (m/s) }, - 'g': -9.81, # Acceleration due to gravity (m/s^2) - 'process_noise': 0.0 # amount of noise in each step + "g": -9.81, # Acceleration due to gravity (m/s^2) + "process_noise": 0.0, # amount of noise in each step } def initialize(self, *args, **kwargs): self.max_x = 0 # Set maximum height return super().initialize(*args, **kwargs) - + def dx(self, x, u): - return self.StateContainer({'x': x['v'], - 'v': self.parameters['g']}) # Acceleration of gravity + return self.StateContainer( + {"x": x["v"], "v": self.parameters["g"]} + ) # Acceleration of gravity def output(self, x): - return self.OutputContainer({'x': x['x']}) + return self.OutputContainer({"x": x["x"]}) # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. # Threshold = Event State == 0. However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - self.max_x = max(self.max_x, x['x']) # Maximum altitude + def event_state(self, x): + self.max_x = max(self.max_x, x["x"]) # Maximum altitude return { - 'falling': max(x['v']/self.parameters['x0']['v'],0), # Throwing speed is max speed - 'impact': max(x['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + x["v"] / self.parameters["x0"]["v"], 0 + ), # Throwing speed is max speed + "impact": max( + x["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } + def run_example(): # Demo model # Step 1: Create instance of model m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return m.InputContainer({}) # No inputs, no way to control # Step 3: Simulate to impact - event = 'impact' - simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, print = True) - + event = "impact" + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1, print=True + ) + # Print flight time - print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2))) + print( + "The object hit the ground in {} seconds".format( + round(simulated_results.times[-1], 2) + ) + ) - # OK, now lets compare performance on different heavenly bodies. + # OK, now lets compare performance on different heavenly bodies. # This requires that we update the cofiguration grav_moon = -1.62 # The first way to change the configuration is to pass in your desired config into construction of the model - m = ThrownObject(g = grav_moon) - simulated_moon_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1) + m = ThrownObject(g=grav_moon) + simulated_moon_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1 + ) grav_mars = -3.711 # You can also update the parameters after it's constructed - m.parameters['g'] = grav_mars - simulated_mars_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1) + m.parameters["g"] = grav_mars + simulated_mars_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1 + ) grav_venus = -8.87 - m.parameters['g'] = grav_venus - simulated_venus_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1) + m.parameters["g"] = grav_venus + simulated_venus_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], dt=0.005, save_freq=1 + ) - print('Time to hit the ground: ') - print('\tvenus: {}s'.format(round(simulated_venus_results.times[-1],2))) - print('\tearth: {}s'.format(round(simulated_results.times[-1],2))) - print('\tmars: {}s'.format(round(simulated_mars_results.times[-1],2))) - print('\tmoon: {}s'.format(round(simulated_moon_results.times[-1],2))) + print("Time to hit the ground: ") + print("\tvenus: {}s".format(round(simulated_venus_results.times[-1], 2))) + print("\tearth: {}s".format(round(simulated_results.times[-1], 2))) + print("\tmars: {}s".format(round(simulated_mars_results.times[-1], 2))) + print("\tmoon: {}s".format(round(simulated_moon_results.times[-1], 2))) # We can also simulate until any event is met by neglecting the threshold_keys argument simulated_results = m.simulate_to_threshold(future_load, dt=0.005, save_freq=1) threshs_met = m.threshold_met(simulated_results.states[-1]) - for (key, met) in threshs_met.items(): + for key, met in threshs_met.items(): if met: event_occurred = key - print('\nThis event that occurred first: ', event_occurred) + print("\nThis event that occurred first: ", event_occurred) # It falls before it hits the ground, obviously # Metrics can be analyzed from the simulation results. For example: monotonicity - print('\nMonotonicity: ', simulated_results.event_states.monotonicity()) + print("\nMonotonicity: ", simulated_results.event_states.monotonicity()) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/d36b60e48d50672e8a74122e3c9166b6/2024PHMTutorial.ipynb b/docs/_downloads/d36b60e48d50672e8a74122e3c9166b6/2024PHMTutorial.ipynb new file mode 100644 index 00000000..e70bdb43 --- /dev/null +++ b/docs/_downloads/d36b60e48d50672e8a74122e3c9166b6/2024PHMTutorial.ipynb @@ -0,0 +1,2113 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "

ProgPy Tutorial

\n", + "

2024 NASA Software of the Year!

\n", + "2024 PHM Society Conference\n", + "\n", + "November, 2024" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Presenter**: Chris Teubert (christopher.a.teubert@nasa.gov)\n", + "\n", + "**In-room help**: Chetan Kulkarni & Rajeev Ghimire\n", + "\n", + "**Online help**: Katelyn Griffith" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Technical Help**: Raise your hand if you need technical help, someone from our team will come around and help you.\n", + "\n", + "**Questions**: Please put questions in the Whova App during the presentation or we will stop at various points to answer questions. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Pre-Work\n", + "\n", + "## 1. Download Whova\n", + "Please download the Whova App for live Q&A during the session. The Q&A can be found here: https://whova.com/portal/webapp/phm1_202411/Agenda/4229218\n", + "\n", + "## 2. Installing ProgPy\n", + "_We recommend installing ProgPy prior to the tutorial_\n", + "\n", + "ProgPy requires a version of Python between 3.7-3.12\n", + "\n", + "The latest stable release of ProgPy is hosted on PyPi. To install via the command line, use the following command: \n", + "\n", + "`$ pip install progpy`\n", + "\n", + "## 3. Start Jupyter Notebook\n", + "Either by cloning the git repo\n", + "\n", + "$ git clone https://github.com/nasa/progpy.git\n", + "\n", + "or using binder: \n", + "https://mybinder.org/v2/gh/nasa/progpy/master?labpath=examples/2024PHMTutorial.ipynb" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Download data\n", + "Next, lets download the data we will be using for this tutorial. To do this we will use the datasets subpackage in progpy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.datasets import nasa_battery\n", + "\n", + "(desc, data) = nasa_battery.load_data(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note, this downloads the battery data from the PCoE datasets:\n", + "https://www.nasa.gov/intelligent-systems-division/discovery-and-systems-health/pcoe/pcoe-data-set-repository/ " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction to ProgPy \n", + "

2024 NASA Software of the Year!!!

" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "NASA’s ProgPy is an open-source python package supporting research and development of prognostics, health management, and predictive maintenance tools. It implements architectures and common functionality of prognostics, supporting researchers and practitioners.\n", + "\n", + "The goal of this tutorial is to instruct users how to use and extend ProgPy. This tutorial will cover how to use a model, including existing models and additional capabilities like parameter estimation and simulation, as well as how to build a new model from scratch. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tutorial will begin with an introduction to prognostics and ProgPy using ProgPy's documentation. Please follow along in the [ProgPy Guide](https://nasa.github.io/progpy/guide.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tutorial Outline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "0. The Dataset \n", + "1. Using an existing model\n", + " - Loading a model\n", + " - Model parameters\n", + " - Simulation\n", + " - Prognostics with data\n", + "2. Building a new model \n", + " - State transition \n", + " - Outputs\n", + " - Events\n", + " - Using the model\n", + " - Parameter estimation\n", + " - Prognostics example\n", + " - Final notes \n", + "3. Advanced Capabilities\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# The Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's prepare the dataset that we will use for this tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(desc[\"description\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The dataset includes 4 different kinds of runs: trickle, step, reference, random walk. We're going to split the dataset into one example for each of the different types for use later.\n", + "\n", + "Let's take a look at the trickle discharge run first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trickle_dataset = data[0]\n", + "print(trickle_dataset)\n", + "trickle_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's do the same for a reference discharge run (5)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reference_dataset = data[5]\n", + "reference_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's do it for the step runs. Note that this is actually multiple runs that we need to combine. \n", + "\n", + "relativeTime resets for each \"run\". So if we're going to use multiple runs together, we need to stitch these times together." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[7][\"absoluteTime\"] = data[7][\"relativeTime\"]\n", + "for i in range(8, 32):\n", + " data[i][\"absoluteTime\"] = (\n", + " data[i][\"relativeTime\"] + data[i - 1][\"absoluteTime\"].iloc[-1]\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we should combine the data into a single dataset and investigate the results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "step_dataset = pd.concat(data[7:32], ignore_index=True)\n", + "print(step_dataset)\n", + "step_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's investigate the random walk discharge" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Like the step discharge, we need to stitch together the times and concatenate the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[35][\"absoluteTime\"] = data[35][\"relativeTime\"]\n", + "for i in range(36, 50):\n", + " data[i][\"absoluteTime\"] = (\n", + " data[i][\"relativeTime\"] + data[i - 1][\"absoluteTime\"].iloc[-1]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "random_walk_dataset = pd.concat(data[35:50], ignore_index=True)\n", + "print(random_walk_dataset)\n", + "random_walk_dataset.plot(\n", + " y=[\"current\", \"voltage\", \"temperature\"], subplots=True, xlabel=\"Time (sec)\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the data is ready for this tutorial, let's dive into it." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using an existing Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first component of ProgPy are the **Prognostics Models**. Models describe the behavior of the system of interest and how the state of the system evolves with use. ProgPy includes capability for prognostics models to be [physics-based](https://nasa.github.io/progpy/glossary.html#term-physics-based-model) or [data-driven](https://nasa.github.io/progpy/glossary.html#term-data-driven-model).\n", + "\n", + "All prognostics models have the same [format](https://nasa.github.io/progpy/prog_models_guide.html#progpy-prognostic-model-format) within ProgPy. The architecture requires definition of model inputs, states, outputs, and events which come together to create a system model.\n", + "\n", + "ProgPy includes a collection of [included models](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html#included-models) which can be accessed through the `progpy.models` package.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Loading a Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To illustrate how to use a built-in model, let's use the [Battery Electrochemistry model](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html#:~:text=class%20progpy.models.BatteryElectroChemEOD(**kwargs)). This model predicts the end-of-discharge of a Lithium-ion battery based on a set of differential equations that describe the electrochemistry of the system [Daigle et al. 2013](https://papers.phmsociety.org/index.php/phmconf/article/view/2252).\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, import the model from the `progpy.models` package." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's create a new battery using the default parameters:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batt = BatteryElectroChemEOD()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Model parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Model parameters describe the specific system the model will simulate. For the Electrochemistry model, the default model parameters are for 18650-type Li-ion battery cells. All parameters can be accessed through `batt.parameters`. Let's print out all of the parameters, followed by the specific parameter for Ohmic drop, denoted as `Ro` in this model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(batt.parameters)\n", + "print(batt[\"Ro\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Parameter values can be configured in various ways. Parameter values can be passed into the constructor as keyword arguments when the model is first instantiated or can be set afterwards. Let's change two parameters to be more specific to our battery use-case:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batt[\"Ro\"] = 0.15\n", + "batt[\"qMobile\"] = 7750" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition to setting model parameter values by hand, ProgPy includes a [parameter estimation](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation:~:text=examples.future_loading-,Parameter%20Estimation,-%23) functionality that tunes the parameters of a general model to match the behavior of a specific system. In ProgPy, the `progpy.PrognosticsModel.estimate_params()` method tunes model parameters so that the model provides a good fit to observed data. In the case of the Electrochemistry model, for example, parameter estimation would take the general battery model and configure it so that it more accurately describes a specific battery. The ProgPy documentation includes a [detailed example](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation:~:text=See%20the%20example%20below%20for%20more%20details) on how to do parameter estimation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Simulation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once a model has been created, the next step is to simulate it's evolution throughout time. Simulation is the foundation of prediction, but unlike full prediction, simulation does not include uncertainty in the state and other product (e.g., [output](https://nasa.github.io/progpy/glossary.html#term-output)) representation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*Future Loading*\n", + "\n", + "Most prognostics models have some sort of [input](https://nasa.github.io/progpy/glossary.html#term-input), i.e. a control or load applied to the system that impacts the system state and outputs. For example, for a battery, the current drawn from the battery is the applied load, or input. In this case, to simulate the system, we must define a `future_loading` function that describes how the system will be loaded, or used, throughout time. (Note that not all systems have applied load, e.g. [ThrownObject](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html?highlight=thrownobject#progpy.models.ThrownObject), and no `future_loading` is required in these cases.)\n", + "\n", + "ProgPy includes pre-defined [loading functions](https://nasa.github.io/progpy/api_ref/progpy/Loading.html?highlight=progpy%20loading) in `progpy.loading`. Here, we'll implement the built-in piecewise loading functionality." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.loading import Piecewise\n", + "\n", + "future_loading = Piecewise(\n", + " InputContainer=batt.InputContainer,\n", + " times=[600, 900, 1800, 3000],\n", + " values={\"i\": [2, 1, 4, 2, 3]},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*Simulate to Threshold*\n", + "\n", + "With this in mind, we're ready to simulate our model forward in time using ProgPy's [simulation functionality](https://nasa.github.io/progpy/prog_models_guide.html#simulation).\n", + "\n", + "Physical systems frequently have one or more failure modes, and there's often a need to predict the progress towards these events and the eventual failure of the system. ProgPy generalizes this concept of predicting Remaining Useful Life (RUL) with [events](https://nasa.github.io/progpy/prog_models_guide.html#events) and their corresponding thresholds at which they occur. \n", + "\n", + "\n", + "Often, there is interest in simulating a system forward in time until a particular event occurs. ProgPy includes this capability with `simulate_to_threshold()`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, let's take a look at what events exist for the Electrochemistry model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batt.events" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The only event in this model is 'EOD' or end-of-discharge. The `progpy.PrognosticsModel.event_state()` method estimates the progress towards the event, with 1 representing no progress towards the event and 0 indicating the event has occurred. The method `progpy.PrognosticsModel.threshold_met()` defines when the event has happened. In the Electrochemistry model, this occurs when the battery voltage drops below some pre-defined value, which is stored in the parameter `VEOD`. Let's see what this threshold value is." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batt.parameters[\"VEOD\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With these definitions in mind, let's simulate the battery model until threshold for EOD is met. We'll use the same `future_loading` function as above. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = batt.simulate_to_threshold(\n", + " future_loading,\n", + " save_freq=10, # Frequency at which results are saved (s)\n", + " horizon=8000, # Maximum time to simulate (s) - This is a cutoff. The simulation will end at this time, or when a threshold has been met, whichever is first\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's visualize the results. Note that the simulation ends when the voltage value hits the VEOD value of 3.0." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = results.inputs.plot(ylabel=\"Current drawn (amps)\")\n", + "fig = results.event_states.plot(ylabel=\"Battery State of Charge\")\n", + "fig = results.outputs.plot(\n", + " ylabel={\"v\": \"voltage (V)\", \"t\": \"temperature (°C)\"}, compact=False\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition to simulating to threshold, ProgPy also includes a simpler capability to simulate until a particular time, using `simulate_to()`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Prognostics with data\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have a basic simulation of our model, let's make a prediction using the prognostics capabilities within ProgPy. The two basic components of prognostics are [state estimation and prediction](https://nasa.github.io/progpy/prog_algs_guide.html#state-estimation-and-prediction-guide). ProgPy includes functionality to do both.\n", + "\n", + "To implement a prognostics example, we first need data from our system. We'll use the data we've already uploaded and prepped above.\n", + "\n", + "For the battery electrochemistry model, we'll need to use a [state estimator](https://nasa.github.io/progpy/prog_algs_guide.html#state-estimation) because the model state is not directly measureable, i.e. it has hidden states. We'll use a Particle Filter and the `estimate` method. ProgPy also includes a Kalman Filter and an Unscented Kalman Filter.\n", + "\n", + "First, let's load the necessary imports." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from progpy.state_estimators import ParticleFilter\n", + "from progpy.uncertain_data import MultivariateNormalDist" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "State estimators require an initial state. To define this, we'll first initialize the model and then define the initial state as a distribution of possible states around this using a multi-variate normal distribution. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "initial_state = batt.initialize() # Initialize model\n", + "# Define distribution around initial state\n", + "x_guess = MultivariateNormalDist(\n", + " labels=initial_state.keys(),\n", + " mean=initial_state.values(),\n", + " covar=np.diag([max(1e-9, abs(x)) for x in initial_state.values()]),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With our initial distribution defined, we can now instantiate the state estimator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pf = ParticleFilter(batt, x_guess)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With this, we're ready to run the State Estimator. To illustrate how state estimation works, let's estimate one step forward in time. First, we'll extract the measurement at this time. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define time step based on data\n", + "dt = random_walk_dataset[\"absoluteTime\"][1] - random_walk_dataset[\"absoluteTime\"][0]\n", + "\n", + "# Data at time point\n", + "z = {\"t\": random_walk_dataset[\"temperature\"][1], \"v\": random_walk_dataset[\"voltage\"][1]}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll estimate the new state by calling the `estimate` method. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract input current from data\n", + "i = {\"i\": random_walk_dataset[\"current\"][1]}\n", + "\n", + "# Estimate the new state\n", + "pf.estimate(dt, i, z)\n", + "x_est = pf.x.mean" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's look at the difference between the estimated state and the true measurement. In the following plots, blue circles represent the initial distribution and orange circles represent the estimated result. The orange circles are more refined and give a better estimate, highlighting the usefulness of the state estimator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = x_guess.plot_scatter(label=\"initial\")\n", + "fig = pf.x.plot_scatter(fig=fig, label=\"update\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we know how to do state estimation, the next key component of prognostics is [prediction](https://nasa.github.io/progpy/prog_algs_guide.html#prediction). ProgPy includes multiple predictors, and we'll implement a [Monte Carlo](https://nasa.github.io/progpy/api_ref/progpy/Predictor.html?highlight=monte%20carlo#included-predictors) predictor here. Let's load the necessary imports. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.predictors import MonteCarlo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, a key factor in modeling any real-world application is noise. See the ProgPy [noise documentation](https://nasa.github.io/progpy/prog_models_guide.html#noise) for a detailed description of different types of noise and how to include it in the ProgPy architecture. Here, let's add some process and measurement noise into our system, to capture any uncertainties. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "PROCESS_NOISE = 2e-4 # Percentage process noise\n", + "MEASUREMENT_NOISE = 1e-4 # Percentage measurement noise\n", + "\n", + "# Apply process noise to state\n", + "batt.parameters[\"process_noise\"] = {\n", + " key: PROCESS_NOISE * value for key, value in initial_state.items()\n", + "}\n", + "\n", + "# Apply measurement noise to output\n", + "z0 = batt.output(initial_state)\n", + "batt.parameters[\"measurement_noise\"] = {\n", + " key: MEASUREMENT_NOISE * value for key, value in z0.items()\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's set up our predictor. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mc = MonteCarlo(batt)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To perform the prediction, we need to specify a few things, including the number of samples we want to use for the prediction, the step size for the prediction, and the prediction horizon (i.e., the time value to predict to)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "NUM_SAMPLES = 100\n", + "STEP_SIZE = 1\n", + "PREDICTION_HORIZON = random_walk_dataset[\"absoluteTime\"].iloc[-1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We also need to define a future loading function based on the load in the dataset we are using. Let's extract the necessary information and define a function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract time and outputs from data\n", + "times_rw = random_walk_dataset[\"absoluteTime\"]\n", + "outputs_rw = [{\"v\": elem[1][\"voltage\"]} for elem in random_walk_dataset.iterrows()]\n", + "\n", + "# Define function\n", + "import numpy as np\n", + "\n", + "\n", + "def future_load_rw(t, x=None):\n", + " current = np.interp(t, times_rw, random_walk_dataset[\"current\"])\n", + " return {\"i\": current}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Adjust voltage threshold\n", + "batt.parameters[\"VEOD\"] = 3.3" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With this, we are ready to predict. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mc_results = mc.predict(\n", + " initial_state,\n", + " future_loading_eqn=future_load_rw,\n", + " n_samples=NUM_SAMPLES,\n", + " dt=STEP_SIZE,\n", + " save_freq=10,\n", + " horizon=PREDICTION_HORIZON,\n", + " constant_noise=True,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's visualize the results. We'll plot 1) the data (in orange), 2) the predicted mean value (blue), 3) the individual predictions to show uncertainty (grey)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "for z in mc_results.outputs:\n", + " plt.plot(z.times, [z_i[\"v\"] for z_i in z], \"grey\", linewidth=0.5)\n", + "plt.plot(\n", + " z.times,\n", + " [z_i[\"v\"] for z_i in mc_results.outputs[-1]],\n", + " \"grey\",\n", + " linewidth=0.5,\n", + " label=\"MC Samples\",\n", + ")\n", + "fig = plt.plot(\n", + " mc_results.times, [z[\"v\"] for z in mc_results.outputs.mean], label=\"mean prediction\"\n", + ")\n", + "fig = plt.plot(\n", + " random_walk_dataset[\"absoluteTime\"],\n", + " random_walk_dataset[\"voltage\"],\n", + " label=\"ground truth\",\n", + ")\n", + "plt.plot(\n", + " [0, PREDICTION_HORIZON],\n", + " [batt[\"VEOD\"], batt[\"VEOD\"]],\n", + " color=\"red\",\n", + " label=\"EOD Threshold\",\n", + ")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mc_results.outputs.mean" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
~~~STOP FOR QUESTIONS~~~
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we understand the basics of state estimation and prediction, as well as how to implement these concepts within ProgPy, we are ready to do a full prognostics example. We'll use the state estimator and predictor we created above." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, let's set a few values we'll use in the simulation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Constant values\n", + "NUM_SAMPLES = 50\n", + "PREDICTION_UPDATE_FREQ = 50 # Number of steps between prediction updates" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's initialize a data structure for storing the results, using the following built-in class:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.predictors import ToEPredictionProfile\n", + "\n", + "profile = ToEPredictionProfile()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we'll perform the prognostics. We'll loop through time, estimating the state at each time step, and making a prediction at the `PREDICTION_UPDATE_FREQ`.\n", + "\n", + "For the sake of this tutorial and the data we're using, we need to change the default voltage threshold value. By changing this, we'll make the simulation run faster for our in-person demo, and ensure that samples reach EOD before the simulation is over. In practice, this value should be chosen based on the specific use-case you're considering. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Loop through time\n", + "for ind in range(3, random_walk_dataset.shape[0]):\n", + " # Extract data\n", + " t = random_walk_dataset[\"absoluteTime\"][ind]\n", + " i = {\"i\": random_walk_dataset[\"current\"][ind]}\n", + " z = {\n", + " \"t\": random_walk_dataset[\"temperature\"][ind],\n", + " \"v\": random_walk_dataset[\"voltage\"][ind],\n", + " }\n", + "\n", + " # Perform state estimation\n", + " pf.estimate(t, i, z)\n", + " eod = batt.event_state(pf.x.mean)[\"EOD\"]\n", + " print(\" - Event State: \", eod)\n", + "\n", + " # Prediction step (at specified frequency)\n", + " if ind % PREDICTION_UPDATE_FREQ == 0:\n", + " # Perform prediction\n", + " mc_results = mc.predict(\n", + " pf.x,\n", + " future_load_rw,\n", + " t0=t,\n", + " n_samples=NUM_SAMPLES,\n", + " dt=1,\n", + " horizon=PREDICTION_HORIZON,\n", + " const_load=True,\n", + " )\n", + "\n", + " # Calculate metrics and print\n", + " metrics = mc_results.time_of_event.metrics()\n", + " print(\n", + " \" - ToE: {} (sigma: {})\".format(\n", + " metrics[\"EOD\"][\"mean\"], metrics[\"EOD\"][\"std\"]\n", + " )\n", + " )\n", + "\n", + " # Save results\n", + " profile.add_prediction(t, mc_results.time_of_event)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With our prognostics results, we can now calculate some metrics to analyze the accuracy. \n", + "\n", + "First, we need to define what the ground truth value is for end-of-discharge." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "GROUND_TRUTH = {\"EOD\": 1600}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll start by calculating the cumulative relative accuracy given the ground truth value. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cra = profile.cumulative_relative_accuracy(GROUND_TRUTH)\n", + "print(f\"Cumulative Relative Accuracy for 'EOD': {cra['EOD']}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll also generate some plots of the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ALPHA = 0.05\n", + "playback_plots = profile.plot(GROUND_TRUTH, ALPHA, True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Data-driven Capabilities and Surrogate Models\n", + "\n", + "In addition to the physics-based modeling functionalities described so far, ProgPy also includes a [framework for implementing data-driven models](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#datamodel). Included methodologies are [Dynamic Mode Decomposition](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#dmdmodel), [LSTM](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#lstmstatetransitionmodel), and [Polynomial Chaos Expansion](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#polynomialchaosexpansion). This data-driven architecture also includes [surrogate models](https://nasa.github.io/progpy/api_ref/progpy/DataModel.html?highlight=surrogate#from-another-prognosticsmodel-i-e-surrogate) which can be used to create models that approximate the original/higher-fidelity models, generally resulting in a less accurate model that is more computationally efficient. \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Building a new model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The last section described how to use a prognostics model distributed with ProgPy (BatteryElectroChemEOD). However, in many cases a model doesn't yet exist for the system being targeted. In those cases, a new model must be built to describe the behavior and degradation of the system.\n", + "\n", + "In this section we will create a new model from scratch, specifically a new physics-based model. ProgPy also includes tools for training data-driven models (see data-driven tab, here: https://nasa.github.io/progpy/prog_models_guide.html#state-transition-models), but that is not the approach we will be demonstrating today.\n", + "\n", + "Physics-based state transition models that cannot be described linearly are constructed by subclassing [progpy.PrognosticsModel](https://nasa.github.io/progpy/api_ref/prog_models/PrognosticModel.html#prog_models.PrognosticsModel). To demonstrate this, we'll create a new model class that inherits from this class. Once constructed in this way, the analysis and simulation tools for PrognosticsModels will work on the new model.\n", + "https://nasa.github.io/progpy/prog_models_guide.html#state-transition-models\n", + "\n", + "We will again be using the battery as a target, creating an alternative to the battery model introduced in the previous section.\n", + "We will be implementing the simplified battery model introduced by Gina Sierra, et. al. (https://www.sciencedirect.com/science/article/pii/S0951832018301406)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we import the PrognosticsModel class. This is the parent class for all ProgPy Models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy import PrognosticsModel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## State Transition\n", + "The first step to creating a physics-based model is implementing state transition. From the paper we see one state (SOC) and one state transition equation:\n", + "\n", + "$SOC(k+1) = SOC(k) - P(k)*\\Delta t * E_{crit}^{-1} + w_2(k)$\n", + "\n", + "where $k$ is discrete time. The $w$ term is process noise. This can be omitted, since it's handled by ProgPy. \n", + "\n", + "In this equation we see one input ($P$, power). Note that the previous battery model uses current, where this uses power.\n", + "\n", + "Armed with this information we can start defining our model. First, we start by declaring our inputs and states:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(PrognosticsModel):\n", + " inputs = [\"P\"]\n", + " states = [\"SOC\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we define parameters. In this case the parameters are the initial SOC state (1) and the E_crit (Internal Total Energy). We get the value for $E_{crit}$ from the paper.\n", + "\n", + "**Note: wont actually subclass in practice, but it's used to break apart model definition into chunks**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858, # Internal Total Energy\n", + " \"x0\": {\n", + " \"SOC\": 1, # State of Charge\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We know that SOC will always be between 0 and 1, so we can specify that explicitly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " state_limits = {\n", + " \"SOC\": (0.0, 1.0),\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we define the state transition equation. There are two methods for doing this: *dx* (for continuous) and *next_state* (for discrete). Today we're using the $dx$ function. This was selected because the model is continuous." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " def dx(self, x, u):\n", + " return self.StateContainer({\"SOC\": -u[\"P\"] / self[\"E_crit\"]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Outputs\n", + "\n", + "Now that state transition is defined, the next step is to define the outputs of the function. From the paper we have the following output equations:\n", + "\n", + "$v(k) = v_{oc}(k) - i(k) * R_{int} + \\eta (k)$\n", + "\n", + "where\n", + "\n", + "$v_{oc}(k) = v_L - \\lambda ^ {\\gamma * SOC(k)} - \\mu * e ^ {-\\beta * \\sqrt{SOC(k)}}$\n", + "\n", + "and\n", + "\n", + "$i(k) = \\frac{v_{oc}(k) - \\sqrt{v_{oc}(k)^2 - 4 * R_{int} * P(k)}}{2 * R_{int}(k)}$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is one output here (v, voltage), the same one input (P, Power), and a few lumped parameters: $v_L$, $\\lambda$, $\\gamma$, $\\mu$, $\\beta$, and $R_{int}$. The default parameters are found in the paper.\n", + "\n", + "Note that $\\eta$ is the measurement noise, which progpy handles, so that's omitted from the equation below.\n", + "\n", + "Note 2: There is a typo in the paper where the sign of the second term in the $v_{oc}$ term. It should be negative (like above), but is reported as positive in the paper.\n", + "\n", + "We can update the definition of the model to include this output and parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " outputs = [\"v\"]\n", + "\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858,\n", + " \"v_L\": 11.148,\n", + " \"lambda\": 0.046,\n", + " \"gamma\": 3.355,\n", + " \"mu\": 2.759,\n", + " \"beta\": 8.482,\n", + " \"R_int\": 0.027,\n", + " \"x0\": {\n", + " \"SOC\": 1,\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the input ($P(k)$) is also used in the output equation, that means it's part of the state of the system. So we will update the states to include this.\n", + "\n", + "**NOTE: WE CHANGE TO next_state.** Above, we define state transition with ProgPy's `dx` method because the model was continuous. Here, with the addition of power, the model becomes discrete, so we must now use ProgPy's `next_state` method to define state transition. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " states = [\"SOC\", \"P\"]\n", + "\n", + " def next_state(self, x, u, dt):\n", + " x[\"SOC\"] = x[\"SOC\"] - u[\"P\"] * dt / self[\"E_crit\"]\n", + " x[\"P\"] = u[\"P\"]\n", + "\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Adding a default P state as well:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " default_parameters = {\n", + " \"E_crit\": 202426.858,\n", + " \"v_L\": 11.148,\n", + " \"lambda\": 0.046,\n", + " \"gamma\": 3.355,\n", + " \"mu\": 2.759,\n", + " \"beta\": 8.482,\n", + " \"R_int\": 0.027,\n", + " \"x0\": {\n", + " \"SOC\": 1,\n", + " \"P\": 0.01, # Added P\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we're ready to define the output equations (defined above)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import math\n", + "\n", + "\n", + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " def output(self, x):\n", + " v_oc = (\n", + " self[\"v_L\"]\n", + " - self[\"lambda\"] ** (self[\"gamma\"] * x[\"SOC\"])\n", + " - self[\"mu\"] * math.exp(-self[\"beta\"] * math.sqrt(x[\"SOC\"]))\n", + " )\n", + " i = (v_oc - math.sqrt(v_oc**2 - 4 * self[\"R_int\"] * x[\"P\"])) / (\n", + " 2 * self[\"R_int\"]\n", + " )\n", + " v = v_oc - i * self[\"R_int\"]\n", + " return self.OutputContainer({\"v\": v})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Events\n", + "Finally we can define events. This is an easy case because our event state (SOC) is part of the model state. So we will simply define a single event (EOD: End of Discharge), where SOC is progress towards that event." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " events = [\"EOD\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then for our event state, we simply extract the relevant state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " def event_state(self, x):\n", + " return {\"EOD\": x[\"SOC\"]}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The threshold of the event is defined as the state where the event state (EOD) is 0.\n", + "\n", + "That's it. We've now defined a complete model. Now it's ready to be used for state estimation or prognostics, like any model distributed with ProgPy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using the Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First step to using the model is initializing an instance of it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = SimplifiedEquivilantCircuit()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To demonstrate/test this model, we will start by simulating with a constant load" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def future_load(t, x=None):\n", + " return {\"P\": 165}\n", + "\n", + "\n", + "results = m.simulate_to_threshold(future_load, dt=1, save_freq=1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = results.event_states.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = results.outputs.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Everything seems to be working well here. Now let's test how well it fits the random walk dataset. First let's prepare the data and future load equation. Note that this future load uses power instead of current (which the last one used)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times_rw = random_walk_dataset[\"absoluteTime\"]\n", + "inputs_rw = [\n", + " elem[1][\"voltage\"] * elem[1][\"current\"] for elem in random_walk_dataset.iterrows()\n", + "]\n", + "outputs_rw = [{\"v\": elem[1][\"voltage\"]} for elem in random_walk_dataset.iterrows()]\n", + "\n", + "import numpy as np\n", + "\n", + "\n", + "def future_load_rw(t, x=None):\n", + " power = np.interp(t, times_rw, inputs_rw)\n", + " return {\"P\": power}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can simulate using that future load equation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=100\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's take a look at the result, comparing it to the ground truth" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from matplotlib import pyplot as plt\n", + "\n", + "plt.figure()\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]])\n", + "plt.plot(result.times, [z[\"v\"] for z in result.outputs])\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "fig = result.event_states.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a terrible fit. Clearly the battery model isn't properly configured for this specific battery. Reading through the paper we see that the default parameters are for a larger battery pouch present in a UAV, much larger than the 18650 battery that produced our dataset\n", + "\n", + "To correct this, we need to estimate the model parameters." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Parameter Estimation\n", + "\n", + "Parameter estimation could be a tutorial on its own. Sometimes it can be considered more of an art than a science.\n", + "\n", + "Parameter Estimation the process of estimating the parameters for a model. This is done using a mixture of data, knowledge (e.g., from system specs), and intuition. For large, complex models, it can be VERY difficult and computationall expensive. Fortunately, in this case we have a relatively simple model.\n", + "\n", + "See: https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, let's take a look at the parameter space" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a very simple model. There are really only 7 parameters to set (assuming initial SOC is always 1).\n", + "\n", + "We can start with setting a few parameters we know. We know that $v_L$ is about 4.2 (from the battery), we expect that the battery internal resistance is the same as that in the electrochemistry model, and we know that the capacity of this battery is significantly smaller." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m[\"v_L\"] = 4.2 # We know this\n", + "m[\"R_int\"] = batt[\"Ro\"]\n", + "m[\"E_crit\"] /= 4 # Battery capacity is much smaller" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's take a look at the model fit again and see where that got us." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result_guess = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]])\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs])\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Much better, but not there yet. Next, we need to use the parameter estimation feature to estimate the parameters further. First let's prepare some data. We'll use the trickle, reference, and step datasets for this. These are close enough temporally that we can expect aging effects to be minimal.\n", + "\n", + "**NOTE: It is important to use a different dataset to estimate parameters as to test**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "times_trickle = trickle_dataset[\"relativeTime\"]\n", + "inputs_trickle = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in trickle_dataset.iterrows()\n", + "]\n", + "outputs_trickle = [{\"v\": elem[1][\"voltage\"]} for elem in trickle_dataset.iterrows()]\n", + "\n", + "times_ref = reference_dataset[\"relativeTime\"]\n", + "inputs_ref = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in reference_dataset.iterrows()\n", + "]\n", + "outputs_ref = [{\"v\": elem[1][\"voltage\"]} for elem in reference_dataset.iterrows()]\n", + "\n", + "times_step = step_dataset[\"relativeTime\"]\n", + "inputs_step = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]} for elem in step_dataset.iterrows()\n", + "]\n", + "outputs_step = [{\"v\": elem[1][\"voltage\"]} for elem in step_dataset.iterrows()]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's print the keys and the error beforehand for reference. The error here is what is used to estimate parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "inputs_reformatted_rw = [\n", + " {\"P\": elem[1][\"voltage\"] * elem[1][\"current\"]}\n", + " for elem in random_walk_dataset.iterrows()\n", + "]\n", + "all_keys = [\"v_L\", \"R_int\", \"lambda\", \"gamma\", \"mu\", \"beta\", \"E_crit\"]\n", + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_guess = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(\"Error: \", error_guess)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, lets set the bounds on each of the parameters.\n", + "\n", + "For $v_L$ and $R_{int}$, we're defining some small bounds because we have an idea of what they might be. For the others we are saying it's between 0.1 and 10x the default battery. We also are adding a constraint that E_crit must be smaller than the default, since we know it's a smaller battery." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bounds = {\n", + " \"v_L\": (3.75, 4.5),\n", + " \"R_int\": (batt[\"Ro\"] * 0.5, batt[\"Ro\"] * 2.5),\n", + " \"lambda\": (0.046 / 10, 0.046 * 10),\n", + " \"gamma\": (3.355 / 10, 3.355 * 10),\n", + " \"mu\": (2.759 / 10, 2.759 * 10),\n", + " \"beta\": (8.482 / 10, 8.482 * 10),\n", + " \"E_crit\": (202426.858 / 10, 202426.858), # (smaller than default)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll estimate the parameters. See [Param Estimation](https://nasa.github.io/progpy/prog_models_guide.html#parameter-estimation) for more details.\n", + "\n", + "We can throw all of the data into estimate parameters, but that will take a LONG time to run, and is prone to errors (e.g., getting stuck in local minima). So, for this example we split characterization into parts.\n", + "\n", + "First we try to capture the base voltage ($v_L$). If we look at the equation above, $v_L$ is the only term that is not a function of either SOC or Power. So, for this estimation we use the trickle dataset, where Power draw is the lowest. We only use the first section where SOC can be assumed to be about 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"v_L\"]\n", + "m.estimate_params(\n", + " times=trickle_dataset[\"relativeTime\"].iloc[:10].to_list(),\n", + " inputs=inputs_trickle[:10],\n", + " outputs=outputs_trickle[:10],\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at what that got us:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit1 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_guess}->{error_fit1} ({error_fit1 - error_guess})\")\n", + "\n", + "result_fit1 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1], [error_guess, error_fit1])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A tiny bit closer, but not significant. Our initial guess (from the packaging) must have been pretty good.\n", + "\n", + "The next step is to estimate the effect of current on the battery. The Parameter $R_{int}$ (internal resistance) effects this. To estimate $R_{int}$ we will use 2 runs where power is not minimal (ref and step runs). Again, we will use only the first couple steps so EOL can be assumed to be 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"R_int\"]\n", + "m.estimate_params(\n", + " times=[times_ref.iloc[:5].to_list(), times_step.iloc[:5].to_list()],\n", + " inputs=[inputs_ref[:5], inputs_step[:5]],\n", + " outputs=[outputs_ref[:5], outputs_step[:5]],\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Again, let's look at what that got us" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit2 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_fit1}->{error_fit2} ({error_fit2 - error_fit1})\")\n", + "\n", + "result_fit2 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.plot(result_fit2.times, [z[\"v\"] for z in result_fit2.outputs], label=\"fit2\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1, 2], [error_guess, error_fit1, error_fit2])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Much better, but not there yet! Finally we need to estimate the effects of SOC on battery performance. This involves all of the rest of the parameters. For this we will use all the rest of the parameters. We will not be using the entire reference curve to capture a full discharge.\n", + "\n", + "Note we're using the error_method MAX_E, instead of the default MSE. This results in parameters that better estimate the end of the discharge curve and is recommended when estimating parameters that are combined with the event state." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keys = [\"lambda\", \"gamma\", \"mu\", \"beta\", \"E_crit\"]\n", + "m.estimate_params(\n", + " times=times_ref.to_list(),\n", + " inputs=inputs_ref,\n", + " outputs=outputs_ref,\n", + " keys=keys,\n", + " dt=1,\n", + " bounds=bounds,\n", + " error_method=\"MAX_E\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see what that got us" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Model configuration\")\n", + "for key in all_keys:\n", + " print(\"-\", key, m[key])\n", + "error_fit3 = m.calc_error(\n", + " times=times_rw.to_list(), inputs=inputs_reformatted_rw, outputs=outputs_rw\n", + ")\n", + "print(f\"Error: {error_fit2}->{error_fit3} ({error_fit3 - error_fit2})\")\n", + "\n", + "result_fit3 = m.simulate_to(\n", + " random_walk_dataset[\"absoluteTime\"].iloc[-1], future_load_rw, dt=1, save_freq=5\n", + ")\n", + "plt.plot(times_rw, [z for z in random_walk_dataset[\"voltage\"]], label=\"ground truth\")\n", + "plt.plot(result_guess.times, [z[\"v\"] for z in result_guess.outputs], label=\"guess\")\n", + "plt.plot(result_fit1.times, [z[\"v\"] for z in result_fit1.outputs], label=\"fit1\")\n", + "plt.plot(result_fit2.times, [z[\"v\"] for z in result_fit2.outputs], label=\"fit2\")\n", + "plt.plot(result_fit3.times, [z[\"v\"] for z in result_fit3.outputs], label=\"fit3\")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")\n", + "\n", + "plt.figure()\n", + "plt.plot([0, 1, 2, 3], [error_guess, error_fit1, error_fit2, error_fit3])\n", + "plt.xlabel(\"Parameter Estimation Run\")\n", + "plt.ylabel(\"Error\")\n", + "plt.ylim((0, 0.25))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is even better. Now we have an \"ok\" estimate, ~150 mV (for the sake of a demo). The estimate could be refined further by setting a lower tolerance (tol parameter), or repeating the 4 parameter estimation steps, above. Talk to Chetan Kulkarni (chetan.s.kulkarni@nasa.gov) with questions on this." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prognostics Example\n", + "Let's repeat the above example that uses BatteryElectroChemEOD with the same data, so we can compare the results.\n", + "\n", + "This does require an extension of the SimplifiedEquivilantCircuit model. \n", + "\n", + "In BatteryElectroChemEOD, EOD is defined as when voltage passes below some threshold (VEOD). This is frequently called \"functional EOD\", because after this point the battery can no longer perform its function.\n", + "\n", + "For SimplifiedEquivilantCircuit, EOD is defined as the point where there is no charge, far after functional EOD. To compare the two, we define a new event: \"Low V\", for when voltage hits a specific threshold (VEOD)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "params = m.parameters # Save learned parameters\n", + "\n", + "\n", + "class SimplifiedEquivilantCircuit(SimplifiedEquivilantCircuit):\n", + " events = [\"EOD\", \"Low V\"]\n", + "\n", + " def event_state(self, x):\n", + " return {\n", + " \"EOD\": x[\"SOC\"],\n", + " \"Low V\": (self.output(x)[\"v\"] - self[\"VEOD\"])\n", + " / (self[\"v_L\"] - self[\"VEOD\"]),\n", + " }\n", + "\n", + "\n", + "SimplifiedEquivilantCircuit.default_parameters[\"VEOD\"] = batt[\"VEOD\"]\n", + "m = SimplifiedEquivilantCircuit()\n", + "m.parameters.update(params) # update with saved parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then initialize the state distribution. Here we define a distribution with significant noise around SOC and no noise around the power (which is overwritten each step anyway)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "initial_state = m.initialize()\n", + "x_guess = MultivariateNormalDist(\n", + " initial_state.keys(), initial_state.values(), np.diag([0.1, 1e-99])\n", + ") # Define distribution around initial state" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can construct the Particle Filter with this guess" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pf = ParticleFilter(m, x_guess)\n", + "fig = pf.x.plot_scatter()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally we define the process and measurement noise and initialize the predictor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.parameters[\"process_noise\"] = {\"SOC\": 5e-5, \"P\": 5e-3}\n", + "m.parameters[\"measurement_noise\"] = {\"v\": 0.2}\n", + "m.parameters[\"process_noise_dist\"] = \"normal\"\n", + "mc = MonteCarlo(m, constant_noise=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's lake a look at a single prediction using this setup, and plot the results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mc_results = mc.predict(\n", + " initial_state,\n", + " future_loading_eqn=future_load_rw,\n", + " n_samples=NUM_SAMPLES,\n", + " dt=STEP_SIZE,\n", + " save_freq=10,\n", + " horizon=PREDICTION_HORIZON,\n", + " const_load=True,\n", + ")\n", + "\n", + "for z in mc_results.outputs:\n", + " plt.plot(z.times, [z_i[\"v\"] for z_i in z], \"grey\", linewidth=0.5)\n", + "fig = plt.plot(\n", + " mc_results.times, [z[\"v\"] for z in mc_results.outputs.mean], label=\"mean prediction\"\n", + ")\n", + "fig = plt.plot(\n", + " random_walk_dataset[\"absoluteTime\"],\n", + " random_walk_dataset[\"voltage\"],\n", + " label=\"ground truth\",\n", + ")\n", + "plt.legend()\n", + "plt.xlabel(\"Time (sec)\")\n", + "plt.ylabel(\"Voltage (volts)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Pretty good, now let's repeat the example from earlier" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Loop through time\n", + "simplified_profile = ToEPredictionProfile()\n", + "for ind in range(3, random_walk_dataset.shape[0]):\n", + " # Extract data\n", + " t = random_walk_dataset[\"absoluteTime\"][ind]\n", + " i = {\"P\": random_walk_dataset[\"current\"][ind] * random_walk_dataset[\"voltage\"][ind]}\n", + " z = {\"v\": random_walk_dataset[\"voltage\"][ind]}\n", + "\n", + " # Perform state estimation\n", + " pf.estimate(t, i, z)\n", + " eod = m.event_state(pf.x.mean)[\"Low V\"]\n", + " print(\" - Event State: \", eod)\n", + "\n", + " # Prediction step (at specified frequency)\n", + " if ind % PREDICTION_UPDATE_FREQ == 0:\n", + " # Perform prediction\n", + " mc_results = mc.predict(\n", + " pf.x,\n", + " future_load_rw,\n", + " t0=t,\n", + " n_samples=NUM_SAMPLES,\n", + " dt=1,\n", + " horizon=PREDICTION_HORIZON,\n", + " events=\"Low V\",\n", + " )\n", + "\n", + " # Save results\n", + " simplified_profile.add_prediction(t, mc_results.time_of_event)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note the runtime.\n", + "\n", + "Finally let's take a look at the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ALPHA = 0.05\n", + "playback_plots = profile.plot(GROUND_TRUTH, ALPHA, True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final Notes on Building New Models\n", + "Here we built a brand new model, from scratch, using the information from a paper. We estimated the parameters using real data and compared its performance with the include BatteryElectroChemEOD.\n", + "\n", + "This is one example on how to build a new model, for more details see https://nasa.github.io/progpy/prog_models_guide.html#building-new-models and the 04_New Models.ipynb file. Other model-building topics include:\n", + "\n", + "* Advanced Noise Representation: e.g., Other distributions\n", + "* Complex Future Loading Methods: E.g., moving average, loading with uncertainty or functions of state\n", + "* Custom Events: e.g., warning thresholds\n", + "* Data-driven models\n", + "* Derived Parameters: parameters that are functions of other parameters\n", + "* Direct Models: Models of state, future_loading -> Time of Event without state transition\n", + "* Linear Models\n", + "* Optimizations\n", + "\n", + "Note that this model can be extended by changing the parameters ecrit and r to steady states. This will help the model account for the effects of aging, since they will be estimated with each state estimation step." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Other Advanced Capabilities" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Combination Models: https://nasa.github.io/progpy/prog_models_guide.html#combination-models and 06_Combining_Models\n", + "* Dynamic Step Size\n", + "* Integration Methods\n", + "* Serialization\n", + "* prog_server" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Closing\n", + "\n", + "**[Contributing](https://nasa.github.io/progpy/index.html#contributing-and-partnering)**: Thank you for attending this tutorial. ProgPy is a collaborative effort, including NASA and external collaborators. If you're interested in contributing or learning more, reach out at christopher.a.teubert@nasa.gov\n", + "\n", + "**We are looking or interns for this summer- email christopher.a.teubert@nasa.gov for details**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.12.0 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "f1062708a37074d70712b695aadee582e0b0b9f95f45576b5521424137d05fec" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/d3b015984c77675d160e6a55a07625b8/pce.py b/docs/_downloads/d3b015984c77675d160e6a55a07625b8/pce.py index cc106b97..6281a284 100644 --- a/docs/_downloads/d3b015984c77675d160e6a55a07625b8/pce.py +++ b/docs/_downloads/d3b015984c77675d160e6a55a07625b8/pce.py @@ -1,5 +1,5 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. -# This ensures that the directory containing examples is in the python search directories +# This ensures that the directory containing examples is in the python search directories """ This example demonstrates the Polynomial Chaos Expansion (PCE) Surrogate Direct Model functionality. PCE is a method by which the behavior of a model can be approximated by a polynomial. In this case the relationship between future loading and time of event. The result is a direct surrogate model that can be used to estimate time of event given a loading profile, without requiring the original model to be simulated. The resulting estimation is MUCH faster than simulating the model. @@ -16,15 +16,16 @@ from progpy.data_models import PCE import scipy as sp + def run_example(): # First lets define some constants # Time step used in simulation - DT = 0.5 + DT = 0.5 # The number of samples to used in the PCE # Larger gives a better approximation, but takes longer to generate - N_SAMPLES = 100 + N_SAMPLES = 100 # The distribution of the input current # This defines the expected values for the input @@ -32,27 +33,29 @@ def run_example(): # With a uniform distribution (i.e., no value in that range is more likely than any other) INPUT_CURRENT_DIST = cp.Uniform(3, 8) # Note: These discharge rates are VERY high. This is only for demonstration purposes. - # The high discharge rate will accelerate the degradation of the battery, + # The high discharge rate will accelerate the degradation of the battery, # which will cause the example to run faster # Step 1: Define base model # First let's define the base model that we're creating a surrogate for. - m = BatteryElectroChemEOD(process_noise = 0) + m = BatteryElectroChemEOD(process_noise=0) x0 = m.initialize() # Initial State - + # Step 2: Build surrogate # Next we build the surrogate model from the base model # To build the model we pass in the distributions of possible values for each input. # We also provide the max_time. This is the maximum time that the surrogate will be used for. # We dont expect any battery to last more than 4000 seconds given the high discharge curves we're passing in. - m_surrogate = PCE.from_model(m, - x0, # Model State - {'i': INPUT_CURRENT_DIST}, # Distribution of inputs - dt=DT, - times = [i*1000 for i in range(5)], - N = N_SAMPLES) + m_surrogate = PCE.from_model( + m, + x0, # Model State + {"i": INPUT_CURRENT_DIST}, # Distribution of inputs + dt=DT, + times=[i * 1000 for i in range(5)], + N=N_SAMPLES, + ) # The result (m_surrogate) is a model that can be used to VERY quickly estimate time_of_event for a new loading profile. - + # Note: this is only valid for the initial state (x0) of the battery. # To train for another state pass in the parameter x (type StateContainer). # e.g. m_surrogate = PCE.from_model(m, SOME_OTHER_STATE, ...) @@ -71,25 +74,30 @@ def run_example(): def future_loading(t, x=None): return m.InputContainer(interpolator(t)[np.newaxis].T) - TEST_SAMPLES = m_surrogate.parameters['J'].sample(size=N_TEST_CASES, rule='latin_hypercube') + TEST_SAMPLES = m_surrogate.parameters["J"].sample( + size=N_TEST_CASES, rule="latin_hypercube" + ) for i in range(N_TEST_CASES): # Generate a new loading profile - interpolator = sp.interpolate.interp1d(m_surrogate.parameters['times'], TEST_SAMPLES[:, i]) - + interpolator = sp.interpolate.interp1d( + m_surrogate.parameters["times"], TEST_SAMPLES[:, i] + ) + # Estimate time of event from ground truth (original model) and surrogate - gt_results[i] = m.time_of_event(x0, future_loading, dt = DT)['EOD'] - surrogate_results[i] = m_surrogate.time_of_event(x0, future_loading)['EOD'] + gt_results[i] = m.time_of_event(x0, future_loading, dt=DT)["EOD"] + surrogate_results[i] = m_surrogate.time_of_event(x0, future_loading)["EOD"] # Plot results # Note here that the approximation is very good, but not perfect # Approximation would be even better with more samples plt.scatter(gt_results, surrogate_results) max_val = max(max(gt_results), max(surrogate_results)) - plt.plot([0, max_val], [0, max_val], 'k--') + plt.plot([0, max_val], [0, max_val], "k--") plt.xlabel("Ground Truth (s)") plt.ylabel("PCE (s)") plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/d570db067c9a858befcf317453b13dc7/sim_powertrain.ipynb b/docs/_downloads/d570db067c9a858befcf317453b13dc7/sim_powertrain.ipynb index 4271fb54..7d27c285 100644 --- a/docs/_downloads/d570db067c9a858befcf317453b13dc7/sim_powertrain.ipynb +++ b/docs/_downloads/d570db067c9a858befcf317453b13dc7/sim_powertrain.ipynb @@ -1,54 +1,77 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample of a powertrain being simulated for a set amount of time. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models import Powertrain, ESC, DCMotor\n\ndef run_example():\n # Create a model object\n esc = ESC()\n motor = DCMotor()\n powertrain = Powertrain(esc, motor)\n\n # Define future loading function - 100% duty all the time\n def future_loading(t, x=None):\n return powertrain.InputContainer({\n 'duty': 1,\n 'v': 23\n })\n \n # Simulate to threshold\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n simulated_results = powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True)\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample of a powertrain being simulated for a set amount of time. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models import Powertrain, ESC, DCMotor\n", + "\n", + "\n", + "def run_example():\n", + " # Create a model object\n", + " esc = ESC()\n", + " motor = DCMotor()\n", + " powertrain = Powertrain(esc, motor)\n", + "\n", + " # Define future loading function - 100% duty all the time\n", + " def future_loading(t, x=None):\n", + " return powertrain.InputContainer({\"duty\": 1, \"v\": 23})\n", + "\n", + " # Simulate to threshold\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " simulated_results = powertrain.simulate_to(\n", + " 2, future_loading, dt=2e-5, save_freq=0.1, print=True\n", + " )\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/d693501cc396088cca1fcc5921e9a3ff/sim_dcmotor_singlephase.py b/docs/_downloads/d693501cc396088cca1fcc5921e9a3ff/sim_dcmotor_singlephase.py index 1ec964b5..2172c1a9 100644 --- a/docs/_downloads/d693501cc396088cca1fcc5921e9a3ff/sim_dcmotor_singlephase.py +++ b/docs/_downloads/d693501cc396088cca1fcc5921e9a3ff/sim_dcmotor_singlephase.py @@ -2,34 +2,44 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a DC motor being simulated for a set amount of time, using the single-phase dcmotor model. +Example of a DC motor being simulated for a set amount of time, using the single-phase dcmotor model. """ import math from progpy.models import dcmotor_singlephase + def run_example(): motor = dcmotor_singlephase.DCMotorSP() - + def future_loading(t, x=None): f = 0.5 - - # Simple load proportional to rotor speed. + + # Simple load proportional to rotor speed. # This is a typical, hyper-simplified model of a fixed-pitch propeller directly attached to the motor shaft such that the resistant torque # becomes: Cq * omega^2, where Cq is a (assumed to be) constant depending on the propeller profile and omega is the rotor speed. # Since there's no transmission, omega is exactly the speed of the motor shaft. if x is None: # First load (before state is initialized) t_l = 0.0 else: - t_l = 1e-5 * x['v_rot']**2.0 - return motor.InputContainer({ - 'v': 10.0 + 2.0 * math.sin(math.tau * f * t), # voltage input assumed sinusoidal just to show variations in the input. No physical meaning. - 't_l': t_l # assuming constant load (simple) - }) + t_l = 1e-5 * x["v_rot"] ** 2.0 + return motor.InputContainer( + { + "v": 10.0 + + 2.0 + * math.sin( + math.tau * f * t + ), # voltage input assumed sinusoidal just to show variations in the input. No physical meaning. + "t_l": t_l, # assuming constant load (simple) + } + ) - simulated_results = motor.simulate_to(2.0, future_loading, dt=1e-3, save_freq=0.1, print=True) + simulated_results = motor.simulate_to( + 2.0, future_loading, dt=1e-3, save_freq=0.1, print=True + ) simulated_results.states.plot(compact=False) -if __name__ == '__main__': + +if __name__ == "__main__": print("Simulation of DC single-phase motor") run_example() diff --git a/docs/_downloads/d8bf57631d1133f1784a65d1b777032f/eol_event.py b/docs/_downloads/d8bf57631d1133f1784a65d1b777032f/eol_event.py new file mode 100644 index 00000000..bb51b129 --- /dev/null +++ b/docs/_downloads/d8bf57631d1133f1784a65d1b777032f/eol_event.py @@ -0,0 +1,63 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. +""" +This example demonstrates a use case where someone wants to predict the first event (i.e., End Of Life (EOL)) of a system. Many system models have multiple events that can occur. In some prognostics applications, users are not interested in predicting a specific event, and are instead interested in when the first event occurs, regardless of the event. This example demonstrates how to predict the first event of a system. + +Method: An instance of ThrownObject is used for this example. In this case it is trivial because the event 'falling' will always occur before 'impact', but for some other models that might not be true. The ThrownObject class is subclassed to add a new event 'EOL' which occurs if any other event occurs. The new model is then instantiated and used for prognostics like in basic_example. Prediction specifically specifies EOL as the event to be predicted. + +Results: + + i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction + ii) Time the event 'EOL' is predicted to occur (with uncertainty) + iii) Histogram of the event 'EOL' +""" + +import matplotlib.pyplot as plt +from progpy.models import ThrownObject +from progpy.predictors import MonteCarlo +from progpy.uncertain_data import ScalarData + + +def run_example(): + # Step 1: Define subclass with EOL event + # Similar to the progpy 'events' example, but with an EOL event + class ThrownObjectWithEOL(ThrownObject): + events = ThrownObject.events + ["EOL"] + + def event_state(self, x): + es = super().event_state(x) + # Add EOL Event (minimum event state) + es["EOL"] = min(list(es.values())) + return es + + def threshold_met(self, x): + t_met = super().threshold_met(x) + # Add EOL Event (if any events have occured) + t_met["EOL"] = any(list(t_met.values())) + return t_met + + # Step 2: Create instance of subclass + m = ThrownObjectWithEOL(process_noise=1) + + # Step 3: Setup for prediction + pred = MonteCarlo(m) + + def future_loading(t=None, x=None): + return {} # No future loading for ThrownObject + + state = ScalarData(m.initialize()) + + # Step 4: Predict to EOL event + pred_results = pred.predict( + state, future_loading, events=["EOL"], dt=0.01, n_samples=50 + ) + # In this case EOL is when the object starts falling + # But for some models where events aren't sequential, there might be a mixture of events in the EOL + + # Step 5: Plot results + pred_results.time_of_event.plot_hist() + plt.show() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/d99199ebb4f8a642ab802d2a3ad2b35c/future_loading.ipynb b/docs/_downloads/d99199ebb4f8a642ab802d2a3ad2b35c/future_loading.ipynb index 856f7980..05c22e39 100644 --- a/docs/_downloads/d99199ebb4f8a642ab802d2a3ad2b35c/future_loading.ipynb +++ b/docs/_downloads/d99199ebb4f8a642ab802d2a3ad2b35c/future_loading.ipynb @@ -1,54 +1,273 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating ways to use future loading. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models import BatteryCircuit\nfrom statistics import mean\nfrom numpy.random import normal\n\ndef run_example(): \n m = BatteryCircuit()\n\n ## Example 1: Variable loading \n def future_loading(t, x=None):\n # Variable (piece-wise) future loading scheme \n if (t < 600):\n i = 2\n elif (t < 900):\n i = 1\n elif (t < 1800):\n i = 4\n elif (t < 3000):\n i = 2 \n else:\n i = 3\n return m.InputContainer({'i': i})\n \n # Simulate to threshold\n options = {\n 'save_freq': 100, # Frequency at which results are saved\n 'dt': 2 # Timestep\n }\n simulated_results = m.simulate_to_threshold(future_loading, **options)\n\n # Now lets plot the inputs and event_states\n simulated_results.inputs.plot(ylabel = 'Variable Load Current (amps)')\n simulated_results.event_states.plot(ylabel = 'Variable Load Event State')\n\n ## Example 2: Moving Average loading \n # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system, \n # but dont have a good way of predicting it, and you expect loading to be steady\n\n def future_loading(t, x=None):\n return future_loading.load\n future_loading.load = m.InputContainer({key : 0 for key in m.inputs})\n\n # Lets define another function to handle the moving average logic\n window = 10 # Number of elements in window\n def moving_avg(i):\n for key in m.inputs:\n moving_avg.loads[key].append(i[key])\n if len(moving_avg.loads[key]) > window:\n del moving_avg.loads[key][0] # Remove first item\n\n # Update future loading eqn\n future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs} \n moving_avg.loads = {key : [] for key in m.inputs} \n\n # OK, we've setup the logic of the moving average. \n # Now lets say you have some measured loads to add\n measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2]\n \n # We're going to feed these into the future loading eqn\n for load in measured_loads:\n moving_avg({'i': load})\n \n # Now the future_loading eqn is setup to use the moving average of whats been seen\n # Simulate to threshold\n simulated_results = m.simulate_to_threshold(future_loading, **options)\n\n # Now lets plot the inputs and event_states\n simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)')\n simulated_results.event_states.plot(ylabel = 'Moving Average Event State')\n\n # In this case, this estimate is wrong because loading will not be steady, but at least it would give you an approximation.\n\n # If more measurements are received, the user could estimate the moving average here and then run a new simulation. \n\n ## Example 3: Gaussian Distribution \n # In this example we will still be doing a variable loading like the first option, but we are going to use a \n # gaussian distribution for each input. \n\n def future_loading(t, x=None):\n # Variable (piece-wise) future loading scheme \n if (t < 600):\n i = 2\n elif (t < 900):\n i = 1\n elif (t < 1800):\n i = 4\n elif (t < 3000):\n i = 2 \n else:\n i = 3\n return m.InputContainer({'i': normal(i, future_loading.std)})\n future_loading.std = 0.2\n\n # Simulate to threshold\n simulated_results = m.simulate_to_threshold(future_loading, **options)\n\n # Now lets plot the inputs and event_states\n simulated_results.inputs.plot(ylabel = 'Variable Gaussian Current (amps)')\n simulated_results.event_states.plot(ylabel = 'Variable Gaussian Event State')\n\n # Example 4: Gaussian- increasing with time\n # For this we're using moving average. This is realistic because the further out from current time you get, \n # the more uncertainty there is in your prediction. \n\n def future_loading(t, x=None):\n std = future_loading.base_std + future_loading.std_slope * (t - future_loading.t)\n return {key : normal(future_loading.load[key], std) for key in future_loading.load.keys()}\n future_loading.load = {key : 0 for key in m.inputs} \n future_loading.base_std = 0.001\n future_loading.std_slope = 1e-4\n future_loading.t = 0\n\n # Lets define another function to handle the moving average logic\n window = 10 # Number of elements in window\n def moving_avg(i):\n for key in m.inputs:\n moving_avg.loads[key].append(i[key])\n if len(moving_avg.loads[key]) > window:\n del moving_avg.loads[key][0] # Remove first item\n\n # Update future loading eqn\n future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs} \n moving_avg.loads = {key : [] for key in m.inputs} \n\n # OK, we've setup the logic of the moving average. \n # Now lets say you have some measured loads to add\n measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2]\n \n # We're going to feed these into the future loading eqn\n for load in measured_loads:\n moving_avg({'i': load})\n\n # Simulate to threshold\n simulated_results = m.simulate_to_threshold(future_loading, **options)\n\n # Now lets plot the inputs and event_states\n simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)')\n simulated_results.event_states.plot(ylabel = 'Moving Average Event State')\n \n # In this example future_loading.t has to be updated with current time before each prediction.\n \n # Example 5 Function of state\n # here we're pretending that input is a function of SOC. It increases as we approach SOC\n\n def future_loading(t, x=None):\n if x is not None:\n event_state = future_loading.event_state(x)\n return m.InputContainer({'i': future_loading.start + (1-event_state['EOD']) * future_loading.slope}) # default\n return m.InputContainer({'i': future_loading.start})\n future_loading.t = 0\n future_loading.event_state = m.event_state\n future_loading.slope = 2 # difference between input with EOD = 1 and 0. \n future_loading.start = 0.5\n\n # Simulate to threshold\n simulated_results = m.simulate_to_threshold(future_loading, **options)\n\n # Now lets plot the inputs and event_states\n simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)')\n simulated_results.event_states.plot(ylabel = 'Moving Average Event State')\n\n # In this example future_loading.t has to be updated with current time before each prediction.\n\n # Show plots\n import matplotlib.pyplot as plt\n plt.show()\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating ways to use future loading. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit\n", + "from statistics import mean\n", + "from numpy.random import normal\n", + "\n", + "\n", + "def run_example():\n", + " m = BatteryCircuit()\n", + "\n", + " ## Example 1: Variable loading\n", + " def future_loading(t, x=None):\n", + " # Variable (piece-wise) future loading scheme\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return m.InputContainer({\"i\": i})\n", + "\n", + " # Simulate to threshold\n", + " options = {\n", + " \"save_freq\": 100, # Frequency at which results are saved\n", + " \"dt\": 2, # Timestep\n", + " }\n", + " simulated_results = m.simulate_to_threshold(future_loading, **options)\n", + "\n", + " # Now lets plot the inputs and event_states\n", + " simulated_results.inputs.plot(ylabel=\"Variable Load Current (amps)\")\n", + " simulated_results.event_states.plot(ylabel=\"Variable Load Event State\")\n", + "\n", + " ## Example 2: Moving Average loading\n", + " # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system,\n", + " # but dont have a good way of predicting it, and you expect loading to be steady\n", + "\n", + " def future_loading(t, x=None):\n", + " return future_loading.load\n", + "\n", + " future_loading.load = m.InputContainer({key: 0 for key in m.inputs})\n", + "\n", + " # Lets define another function to handle the moving average logic\n", + " window = 10 # Number of elements in window\n", + "\n", + " def moving_avg(i):\n", + " for key in m.inputs:\n", + " moving_avg.loads[key].append(i[key])\n", + " if len(moving_avg.loads[key]) > window:\n", + " del moving_avg.loads[key][0] # Remove first item\n", + "\n", + " # Update future loading eqn\n", + " future_loading.load = {key: mean(moving_avg.loads[key]) for key in m.inputs}\n", + "\n", + " moving_avg.loads = {key: [] for key in m.inputs}\n", + "\n", + " # OK, we've setup the logic of the moving average.\n", + " # Now lets say you have some measured loads to add\n", + " measured_loads = [\n", + " 10,\n", + " 11.5,\n", + " 12.0,\n", + " 8,\n", + " 2.1,\n", + " 1.8,\n", + " 1.99,\n", + " 2.0,\n", + " 2.01,\n", + " 1.89,\n", + " 1.92,\n", + " 2.01,\n", + " 2.1,\n", + " 2.2,\n", + " ]\n", + "\n", + " # We're going to feed these into the future loading eqn\n", + " for load in measured_loads:\n", + " moving_avg({\"i\": load})\n", + "\n", + " # Now the future_loading eqn is setup to use the moving average of whats been seen\n", + " # Simulate to threshold\n", + " simulated_results = m.simulate_to_threshold(future_loading, **options)\n", + "\n", + " # Now lets plot the inputs and event_states\n", + " simulated_results.inputs.plot(ylabel=\"Moving Average Current (amps)\")\n", + " simulated_results.event_states.plot(ylabel=\"Moving Average Event State\")\n", + "\n", + " # In this case, this estimate is wrong because loading will not be steady, but at least it would give you an approximation.\n", + "\n", + " # If more measurements are received, the user could estimate the moving average here and then run a new simulation.\n", + "\n", + " ## Example 3: Gaussian Distribution\n", + " # In this example we will still be doing a variable loading like the first option, but we are going to use a\n", + " # gaussian distribution for each input.\n", + "\n", + " def future_loading(t, x=None):\n", + " # Variable (piece-wise) future loading scheme\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return m.InputContainer({\"i\": normal(i, future_loading.std)})\n", + "\n", + " future_loading.std = 0.2\n", + "\n", + " # Simulate to threshold\n", + " simulated_results = m.simulate_to_threshold(future_loading, **options)\n", + "\n", + " # Now lets plot the inputs and event_states\n", + " simulated_results.inputs.plot(ylabel=\"Variable Gaussian Current (amps)\")\n", + " simulated_results.event_states.plot(ylabel=\"Variable Gaussian Event State\")\n", + "\n", + " # Example 4: Gaussian- increasing with time\n", + " # For this we're using moving average. This is realistic because the further out from current time you get,\n", + " # the more uncertainty there is in your prediction.\n", + "\n", + " def future_loading(t, x=None):\n", + " std = future_loading.base_std + future_loading.std_slope * (\n", + " t - future_loading.t\n", + " )\n", + " return {\n", + " key: normal(future_loading.load[key], std)\n", + " for key in future_loading.load.keys()\n", + " }\n", + "\n", + " future_loading.load = {key: 0 for key in m.inputs}\n", + " future_loading.base_std = 0.001\n", + " future_loading.std_slope = 1e-4\n", + " future_loading.t = 0\n", + "\n", + " # Lets define another function to handle the moving average logic\n", + " window = 10 # Number of elements in window\n", + "\n", + " def moving_avg(i):\n", + " for key in m.inputs:\n", + " moving_avg.loads[key].append(i[key])\n", + " if len(moving_avg.loads[key]) > window:\n", + " del moving_avg.loads[key][0] # Remove first item\n", + "\n", + " # Update future loading eqn\n", + " future_loading.load = {key: mean(moving_avg.loads[key]) for key in m.inputs}\n", + "\n", + " moving_avg.loads = {key: [] for key in m.inputs}\n", + "\n", + " # OK, we've setup the logic of the moving average.\n", + " # Now lets say you have some measured loads to add\n", + " measured_loads = [\n", + " 10,\n", + " 11.5,\n", + " 12.0,\n", + " 8,\n", + " 2.1,\n", + " 1.8,\n", + " 1.99,\n", + " 2.0,\n", + " 2.01,\n", + " 1.89,\n", + " 1.92,\n", + " 2.01,\n", + " 2.1,\n", + " 2.2,\n", + " ]\n", + "\n", + " # We're going to feed these into the future loading eqn\n", + " for load in measured_loads:\n", + " moving_avg({\"i\": load})\n", + "\n", + " # Simulate to threshold\n", + " simulated_results = m.simulate_to_threshold(future_loading, **options)\n", + "\n", + " # Now lets plot the inputs and event_states\n", + " simulated_results.inputs.plot(ylabel=\"Moving Average Current (amps)\")\n", + " simulated_results.event_states.plot(ylabel=\"Moving Average Event State\")\n", + "\n", + " # In this example future_loading.t has to be updated with current time before each prediction.\n", + "\n", + " # Example 5 Function of state\n", + " # here we're pretending that input is a function of SOC. It increases as we approach SOC\n", + "\n", + " def future_loading(t, x=None):\n", + " if x is not None:\n", + " event_state = future_loading.event_state(x)\n", + " return m.InputContainer(\n", + " {\n", + " \"i\": future_loading.start\n", + " + (1 - event_state[\"EOD\"]) * future_loading.slope\n", + " }\n", + " ) # default\n", + " return m.InputContainer({\"i\": future_loading.start})\n", + "\n", + " future_loading.t = 0\n", + " future_loading.event_state = m.event_state\n", + " future_loading.slope = 2 # difference between input with EOD = 1 and 0.\n", + " future_loading.start = 0.5\n", + "\n", + " # Simulate to threshold\n", + " simulated_results = m.simulate_to_threshold(future_loading, **options)\n", + "\n", + " # Now lets plot the inputs and event_states\n", + " simulated_results.inputs.plot(ylabel=\"Moving Average Current (amps)\")\n", + " simulated_results.event_states.plot(ylabel=\"Moving Average Event State\")\n", + "\n", + " # In this example future_loading.t has to be updated with current time before each prediction.\n", + "\n", + " # Show plots\n", + " import matplotlib.pyplot as plt\n", + "\n", + " plt.show()\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/da191f26e484dddead89fb142ee68a12/sim.py b/docs/_downloads/da191f26e484dddead89fb142ee68a12/sim.py index 4ffb2be8..68eb289d 100644 --- a/docs/_downloads/da191f26e484dddead89fb142ee68a12/sim.py +++ b/docs/_downloads/da191f26e484dddead89fb142ee68a12/sim.py @@ -9,51 +9,58 @@ # VVV Uncomment this to use Electro Chemistry Model VVV # from progpy.models import BatteryElectroChem as Battery -def run_example(): + +def run_example(): # Step 1: Create a model object batt = Battery() - # Step 2: Define future loading function + # Step 2: Define future loading function def future_loading(t, x=None): - # Variable (piece-wise) future loading scheme - if (t < 600): + # Variable (piece-wise) future loading scheme + if t < 600: i = 2 - elif (t < 900): + elif t < 900: i = 1 - elif (t < 1800): + elif t < 1800: i = 4 - elif (t < 3000): - i = 2 + elif t < 3000: + i = 2 else: i = 3 - return batt.InputContainer({'i': i}) + return batt.InputContainer({"i": i}) + # simulate for 200 seconds - print('\n\n------------------------------------------------') - print('Simulating for 200 seconds\n\n') - simulated_results = batt.simulate_to(200, future_loading, print = True, progress = True) + print("\n\n------------------------------------------------") + print("Simulating for 200 seconds\n\n") + simulated_results = batt.simulate_to(200, future_loading, print=True, progress=True) # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") options = { - 'save_freq': 100, # Frequency at which results are saved - 'dt': 2, # Timestep - 'print': True, - 'progress': True + "save_freq": 100, # Frequency at which results are saved + "dt": 2, # Timestep + "print": True, + "progress": True, } simulated_results = batt.simulate_to_threshold(future_loading, **options) # Alternately, you can set a max step size and allow step size to be adjusted automatically - options['dt'] = ('auto', 2) # set step size automatically, with a max of 2 seconds - options['save_freq'] = 201 # Save every 201 seconds - options['save_pts'] = [250, 772, 1023] # Special points we sould like to see reported + options["dt"] = ("auto", 2) # set step size automatically, with a max of 2 seconds + options["save_freq"] = 201 # Save every 201 seconds + options["save_pts"] = [ + 250, + 772, + 1023, + ] # Special points we sould like to see reported simulated_results = batt.simulate_to_threshold(future_loading, **options) # Note that even though the step size is 2, the odd points in the save frequency are met perfectly, dt is adjusted automatically to capture the save points # You can also change the integration method. For example: - options['integration_method'] = 'rk4' # Using Runge-Kutta 4th order + options["integration_method"] = "rk4" # Using Runge-Kutta 4th order simulated_results_rk4 = batt.simulate_to_threshold(future_loading, **options) -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/daafe64ff08a2ab2ed2ab052cf9985bf/sim_pump.py b/docs/_downloads/daafe64ff08a2ab2ed2ab052cf9985bf/sim_pump.py index f895f749..446bec20 100644 --- a/docs/_downloads/daafe64ff08a2ab2ed2ab052cf9985bf/sim_pump.py +++ b/docs/_downloads/daafe64ff08a2ab2ed2ab052cf9985bf/sim_pump.py @@ -2,61 +2,68 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a centrifugal pump being simulated until threshold is met. +Example of a centrifugal pump being simulated until threshold is met. """ from progpy.models import CentrifugalPump -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt from progpy.sim_result import SimResult -def run_example(): + + +def run_example(): # Step 1: Setup Pump - pump = CentrifugalPump(process_noise= 0) - pump.parameters['x0']['wA'] = 0.01 # Set Wear Rate + pump = CentrifugalPump(process_noise=0) + pump.parameters["x0"]["wA"] = 0.01 # Set Wear Rate # Step 2: Setup Future Loading cycle_time = 3600 + def future_loading(t, x=None): t = t % cycle_time - if t < cycle_time/2.0: + if t < cycle_time / 2.0: V = 471.2389 - elif t < cycle_time/2 + 100: - V = 471.2389 + (t-cycle_time/2) + elif t < cycle_time / 2 + 100: + V = 471.2389 + (t - cycle_time / 2) elif t < cycle_time - 100: V = 571.2389 else: - V = 471.2398 - (t-cycle_time) + V = 471.2398 - (t - cycle_time) - return pump.InputContainer({ - 'Tamb': 290, - 'V': V, - 'pdisch': 928654, - 'psuc': 239179, - 'wsync': V * 0.8 - }) + return pump.InputContainer( + {"Tamb": 290, "V": V, "pdisch": 928654, "psuc": 239179, "wsync": V * 0.8} + ) # Step 3: Sim - first_output = pump.output(pump.initialize(future_loading(0),{})) - config = { - 'horizon': 1e5, - 'save_freq': 1e3, - 'print': True - } - simulated_results = pump.simulate_to_threshold(future_loading, first_output, **config) + first_output = pump.output(pump.initialize(future_loading(0), {})) + config = {"horizon": 1e5, "save_freq": 1e3, "print": True} + simulated_results = pump.simulate_to_threshold( + future_loading, first_output, **config + ) # Step 4: Plot Results - simulated_results.inputs.plot(compact = False, title = 'Inputs', xlabel = 'time', ylabel = {lbl: lbl for lbl in pump.inputs}) - simulated_results.outputs.plot(compact = False, title = 'Outputs', xlabel = 'time', ylabel = '') - simulated_results.states.plot(compact = False, title = 'States', xlabel = 'time', ylabel = '') - simulated_results.event_states.plot(compact = False, title = 'Events', xlabel = 'time', ylabel = '') + simulated_results.inputs.plot( + compact=False, + title="Inputs", + xlabel="time", + ylabel={lbl: lbl for lbl in pump.inputs}, + ) + simulated_results.outputs.plot( + compact=False, title="Outputs", xlabel="time", ylabel="" + ) + simulated_results.states.plot( + compact=False, title="States", xlabel="time", ylabel="" + ) + simulated_results.event_states.plot( + compact=False, title="Events", xlabel="time", ylabel="" + ) thresholds_met = [pump.threshold_met(x) for x in simulated_results.states] thresholds_met = SimResult(simulated_results.times, thresholds_met) - thresholds_met.plot(compact = False, title = 'Threshold Met', xlabel = 'time', ylabel = '') - - + thresholds_met.plot(compact=False, title="Threshold Met", xlabel="time", ylabel="") plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/daee0732ebc9103bad35edc2271b7f8d/measurement_eqn_example.py b/docs/_downloads/daee0732ebc9103bad35edc2271b7f8d/measurement_eqn_example.py new file mode 100644 index 00000000..7c261ba2 --- /dev/null +++ b/docs/_downloads/daee0732ebc9103bad35edc2271b7f8d/measurement_eqn_example.py @@ -0,0 +1,116 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + +""" +This example performs a state estimation with uncertainty given a Prognostics Model for a system in which not all output values are measured. + +Method: An instance of the BatteryCircuit model in progpy is created. We assume that we are only measuring one of the output values, and we define a subclass to remove the other output value. + Estimation of the current state is performed at various time steps, using the defined state_estimator. + +Results: + i) Estimate of the current state given various times + ii) Display of results, such as prior and posterior state estimate values and SOC +""" + +from progpy.models import BatteryCircuit as Battery +# VVV Uncomment this to use Electro Chemistry Model VVV +# from progpy.models import BatteryElectroChem as Battery + +from progpy import * + + +def run_example(): + # Step 1: Subclass model with measurement equation + # In this case we're only measuring 'v' (i.e., removing temperature) + # To do this we're creating a new class that's subclassed from the complete model. + # To change the outputs we just have to override outputs (the list of keys) + class MyBattery(Battery): + outputs = ["v"] + + # Step 2: Setup model & future loading + batt = MyBattery() + loads = [ # Define loads here to accelerate prediction + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 1}), + batt.InputContainer({"i": 4}), + batt.InputContainer({"i": 2}), + batt.InputContainer({"i": 3}), + ] + + def future_loading(t, x=None): + # Variable (piece-wise) future loading scheme + if t < 600: + return loads[0] + elif t < 900: + return loads[1] + elif t < 1800: + return loads[2] + elif t < 3000: + return loads[3] + return loads[-1] + + x0 = batt.parameters["x0"] + + # Step 3: Use the updated model + filt = state_estimators.ParticleFilter(batt, x0) + + # Step 4: Run step and print results + print("Running state estimation step with only one of 2 outputs measured") + + # Print Prior + print("\nPrior State:", filt.x.mean) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + + # Estimate Step + # Note, only voltage was needed in the measurement step, since that is the only output we're measuring + t = 0.1 + load = future_loading(t) + filt.estimate(t, load, {"v": 3.915}) + + # Print Posterior + print("\nPosterior State:", filt.x.mean) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + + # Another Estimate Step + t = 0.2 + load = future_loading(t) + filt.estimate(t, load, {"v": 3.91}) + + # Print Posterior Again + print("\nPosterior State (t={}):".format(t), filt.x.mean) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + + # Note that the particle filter was still able to perform state estimation. + # The updated outputs can be used for any case where the measurement doesn't match the model outputs + # For example, when units are different, or when the measurement is some combination of the outputs + # These are a little more complicated, since they require an instance of the parent class. For example: + + parent = Battery() + + class MyBattery(Battery): + outputs = ["tv"] # output is temperature * voltage (for some reason) + + def output(self, x): + parent.parameters = ( + self.parameters + ) # only needed if you expect to change parameters + z = parent.output(x) + return self.OutputContainer({"tv": z["v"] * z["t"]}) + + batt = MyBattery() + filt = state_estimators.ParticleFilter(batt, x0) + + print("-----------------\n\nExample 2") + print("\nPrior State:", filt.x.mean) + print("\toutput: ", batt.output(filt.x.mean)) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + t = 0.1 + load = future_loading(t) + filt.estimate(t, load, {"tv": 80}) + print("\nPosterior State:", filt.x.mean) + print("\toutput: ", batt.output(filt.x.mean)) + print("\tSOC: ", batt.event_state(filt.x.mean)["EOD"]) + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py b/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py index b8f36b45..6dd6f918 100644 --- a/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py +++ b/docs/_downloads/dc9f2bf22220701a3b515335cb4d8038/full_lstm_model.py @@ -2,11 +2,11 @@ # This ensures that the directory containing examples is in the python search directories """ -Example building a full model with events and thresholds using LSTMStateTransitionModel. +Example building a full model with events and thresholds using LSTMStateTransitionModel. .. dropdown:: More details - In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. + In this example, we generate fake data using the ThrownObject model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We then create a subclass of the LSTMStateTransitionModel, defining the event_state and threshold equations as a function of output. We use the generated model and compare to the original model. """ @@ -16,50 +16,68 @@ from progpy.data_models import LSTMStateTransitionModel from progpy.models import ThrownObject + def run_example(): # ----------------------------------------------------- # Method 1 - manual definition - # In this example we complete the models by manually defining event_state + # In this example we complete the models by manually defining event_state # and thresholds_met as function of output. # ----------------------------------------------------- TIMESTEP = 0.01 m = ThrownObject() + def future_loading(t, x=None): - return m.InputContainer({}) # No input for thrown object + return m.InputContainer({}) # No input for thrown object # Step 1: Generate additional data - # We will use data generated above, but we also want data at additional timesteps - print('Generating data...') - data = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP, dt=TIMESTEP) - data_half = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2) - data_quarter = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4) - data_twice = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2) - data_four = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4) + # We will use data generated above, but we also want data at additional timesteps + print("Generating data...") + data = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) + data_half = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) # Step 2: Data Prep # We need to add the timestep as a input u = np.array([[TIMESTEP] for _ in data.inputs]) - u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs]) - u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs]) - u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs]) - u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) - # In this case we are saying that velocity is directly measurable, + # In this case we are saying that velocity is directly measurable, # unlike the original model. This is necessary to calculate the events. # Since the outputs will then match the states, we pass in the states below u_data = [u, u_half, u_quarter, u_twice, u_four] - z_data = [data.states, data_half.states, data_quarter.states, data_twice.states, data_four.states] + z_data = [ + data.states, + data_half.states, + data_quarter.states, + data_twice.states, + data_four.states, + ] # Step 3: Create model - print('Creating model...') + print("Creating model...") - # Create a subclass of LSTMStateTransitionModel, + # Create a subclass of LSTMStateTransitionModel, # overriding event-related methods and members class LSTMThrownObject(LSTMStateTransitionModel): events = [ - 'falling', # Event- object is falling - 'impact' # Event- object has impacted ground + "falling", # Event- object is falling + "impact", # Event- object has impacted ground ] def initialize(self, u=None, z=None): @@ -71,55 +89,65 @@ def event_state(self, x): # Using class name instead of self allows the class to be subclassed z = LSTMThrownObject.output(self, x) # Logic from ThrownObject.event_state, using output instead of state - self.max_x = max(self.max_x, z['x']) # Maximum altitude + self.max_x = max(self.max_x, z["x"]) # Maximum altitude return { - 'falling': max(z['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': max(z['x']/self.max_x,0) # 1 until falling begins, then it's fraction of height + "falling": max( + z["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": max( + z["x"] / self.max_x, 0 + ), # 1 until falling begins, then it's fraction of height } def threshold_met(self, x): z = LSTMThrownObject.output(self, x) # Logic from ThrownObject.threshold_met, using output instead of state - return { - 'falling': z['v'] < 0, - 'impact': z['x'] <= 0 - } - + return {"falling": z["v"] < 0, "impact": z["x"] <= 0} + # Step 4: Generate Model - print('Building model...') + print("Building model...") m2 = LSTMThrownObject.from_data( - inputs=u_data, + inputs=u_data, outputs=z_data, - window=4, - epochs=30, - input_keys = ['dt'], - output_keys = m.states) + window=4, + epochs=30, + input_keys=["dt"], + output_keys=m.states, + ) m2.plot_history() # Step 5: Simulate with model t_counter = 0 x_counter = m.initialize() - def future_loading3(t, x = None): + + def future_loading3(t, x=None): nonlocal t_counter, x_counter - z = m2.InputContainer({'x_t-1': x_counter['x'], 'v_t-1': x_counter['v'], 'dt': t - t_counter}) + z = m2.InputContainer( + {"x_t-1": x_counter["x"], "v_t-1": x_counter["v"], "dt": t - t_counter} + ) x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z # Use new dt, not used in training - # Using a dt not used in training will demonstrate the model's + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set - data = m.simulate_to_threshold(future_loading, events='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3) - results3 = m2.simulate_to_threshold(future_loading3, events='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3) + data = m.simulate_to_threshold( + future_loading, events="impact", dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m2.simulate_to_threshold( + future_loading3, events="impact", dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) # Step 6: Compare Results - print('Comparing results...') - print('Predicted impact time:') - print('\tOriginal: ', data.times[-1]) - print('\tLSTM: ', results3.times[-1]) - data.outputs.plot(title='original model') - results3.outputs.plot(title='generated model') + print("Comparing results...") + print("Predicted impact time:") + print("\tOriginal: ", data.times[-1]) + print("\tLSTM: ", results3.times[-1]) + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/dd6ad40a2453a30b26fc052906232b15/uav_dynamics_model.py b/docs/_downloads/dd6ad40a2453a30b26fc052906232b15/uav_dynamics_model.py new file mode 100644 index 00000000..ed14def9 --- /dev/null +++ b/docs/_downloads/dd6ad40a2453a30b26fc052906232b15/uav_dynamics_model.py @@ -0,0 +1,236 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. + +""" +Example of generating a trajectory for a small rotorcraft through a set of coarse waypoints, and simulate the rotorcraft flight using a 6-dof model. +""" + +import matplotlib.pyplot as plt +import numpy as np + +from progpy.utils.traj_gen import Trajectory +from progpy.models.aircraft_model import SmallRotorcraft +from progpy.loading.controllers import LQR_I, LQR + + +def run_example(): + # Initialize vehicle + vehicle = SmallRotorcraft( + dt=0.05, vehicle_model="tarot18", process_noise=0, measurement_noise=0 + ) + + # EXAMPLE 1: + # Define coarse waypoints: latitudes, longitudes, and altitudes are + # required, ETAs are optional + # Latitudes and longitudes must be defined as numpy arrays of size n x 1 + # and with unit radians + # Altitudes must be defined as numpy arrays of size n x 1 with unit meters + # ETAs (if included) must be defined as a list of datetime objects + # If ETAs are not included, speeds must be defined (see Example 2) + + # Here, we specify waypoints in a dictionary and then pass + # lat/lon/alt/ETAs into the trajectory class + lat_deg = np.array( + [ + 37.09776, + 37.09776, + 37.09776, + 37.09798, + 37.09748, + 37.09665, + 37.09703, + 37.09719, + 37.09719, + 37.09719, + 37.09719, + 37.09748, + 37.09798, + 37.09776, + 37.09776, + ] + ) + lon_deg = np.array( + [ + -76.38631, + -76.38629, + -76.38629, + -76.38589, + -76.3848, + -76.38569, + -76.38658, + -76.38628, + -76.38628, + -76.38628, + -76.38628, + -76.3848, + -76.38589, + -76.38629, + -76.38629, + ] + ) + alt_ft = np.array( + [ + -1.9682394, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 0.0, + 0.0, + 164.01995, + 164.01995, + 164.01995, + 164.01995, + 0.0, + ] + ) + time_unix = [ + 1544188336, + 1544188358, + 1544188360, + 1544188377, + 1544188394, + 1544188411, + 1544188428, + 1544188496, + 1544188539, + 1544188584, + 1544188601, + 1544188635, + 1544188652, + 1544188672, + 1544188692, + ] + + # Generate trajectory + # ===================== + # Generate trajectory object and pass the route (waypoints, ETA) to it + traj = Trajectory(lat=lat_deg, lon=lon_deg, alt=alt_ft * 0.3048, etas=time_unix) + + ref_traj = traj.generate(dt=vehicle.parameters["dt"]) + + # Define controller and build scheduled control. The controller acts as a + # future_loading function when simulating + # We use a linear quadratic regulator (LQR), which tries to minimize the + # cost function defined by: + # J = \int{ x^T Q x + u^T R u \mathrm{d}t } + # Where x is the state vector, u is the input vector, t is time, Q is the + # state error penalty matrix, and R is the input generation penalty matrix. + # The LQR uses a linearized version of the dynamic system + # (i.e., dxdt = A x + Bu) to find the gain matrix K that minimizes the cost J. + ctrl = LQR(ref_traj, vehicle) + + # Simulate vehicle to fly trajectory + traj_results = vehicle.simulate_to_threshold( + ctrl, dt=vehicle.parameters["dt"], save_freq=vehicle.parameters["dt"] + ) + + # Visualize Results + vehicle.visualize_traj(pred=traj_results, ref=ref_traj) + + # EXAMPLE 2: + # In this example, we define another trajectory through the same + # waypoints but with speeds defined instead of ETAs + + # Generate trajectory object and pass the route (lat/lon/alt, no ETAs) + # and speed information to it + traj_speed = Trajectory( + lat=lat_deg, + lon=lon_deg, + alt=alt_ft * 0.3048, + cruise_speed=8.0, + ascent_speed=2.0, + descent_speed=3.0, + landing_speed=2.0, + ) + ref_traj_speeds = traj_speed.generate(dt=vehicle.parameters["dt"]) + + # Define controller and build scheduled control. This time we'll use LQR_I, + # which is a linear quadratic regulator with integral action. + # The integral action has the same purpose of "I" in PI or PID controllers, + # which is to minimize offset errors in the variable of interest. + # This version of LQR_I compensates for integral errors in the position of + # the vehicle, i.e., x, y, z variables of the state vector. + ctrl_speeds = LQR_I(ref_traj_speeds, vehicle) + + # Set simulation options + options = {"dt": vehicle.parameters["dt"], "save_freq": vehicle.parameters["dt"]} + + # Simulate vehicle to fly trajectory + traj_results_speeds = vehicle.simulate_to_threshold(ctrl_speeds, **options) + + # Visualize results - notice these results are slightly different, since + # the speeds through the waypoints (and therefore the resulting trajectory) + # are different than Example 1 + vehicle.visualize_traj(pred=traj_results_speeds, ref=ref_traj_speeds) + + # EXAMPLE 3: + # In this example, we just want to simulate a specific portion of the + # reference trajectory + # We will simulate the second cruise interval in Example 1, + # i.e. waypoints 10 - 13 (where the first waypoint is index 0). + # We will use the reference trajectory (ref_traj) and controller (ctrl) + # already generated in Example 1 + + # First, we'll re-define the ETAs in the waypoints dictionary + # (since we deleted them from the waypoints in Example 2) + time_unix = np.array( + [ + 1544188336, + 1544188358, + 1544188360, + 1544188377, + 1544188394, + 1544188411, + 1544188428, + 1544188496, + 1544188539, + 1544188584, + 1544188601, + 1544188635, + 1544188652, + 1544188672, + 1544188692, + ] + ) + + # Extract time information for desired interval, starting at waypoint 10 + # and ending at waypoint 13 + start_time = time_unix[10] - time_unix[0] + end_time = time_unix[13] - time_unix[0] + sim_time = end_time - start_time + + # Define initial state, x0, based on reference trajectory at start_time + ind = np.where(ref_traj["t"] == start_time) + x0 = {key: ref_traj[key][ind][0] for key in ref_traj.keys()} + vehicle.parameters["x0"] = x0 + + # Define simulation parameters - note that we must define t0 as start_time + # since we are not starting at the default of t0 = 0 + options = { + "dt": vehicle.parameters["dt"], + "save_freq": vehicle.parameters["dt"], + "t0": start_time, + } + + # Simulate starting from this initial state from start_time to end_time + traj_results_interval = vehicle.simulate_to(sim_time, ctrl, **options) + + # Plot results with Example 1 results to show equivalence on this interval + z_1 = [output["z"] for output in traj_results.outputs] + z_4 = [output["z"] for output in traj_results_interval.outputs] + + fig, ax = plt.subplots() + ax.plot(traj_results.times, z_1, "-b", label="Example 1") + ax.plot(traj_results_interval.times, z_4, "--r", label="Example 3") + ax.set_xlabel("time, s", fontsize=14) + ax.set_ylabel("altitude, m", fontsize=14) + ax.legend() + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/dd90523c294730d38024b15ef5fe8bf6/custom_model.py b/docs/_downloads/dd90523c294730d38024b15ef5fe8bf6/custom_model.py index 8f528f2d..2f441a13 100644 --- a/docs/_downloads/dd90523c294730d38024b15ef5fe8bf6/custom_model.py +++ b/docs/_downloads/dd90523c294730d38024b15ef5fe8bf6/custom_model.py @@ -6,7 +6,7 @@ For most cases, you will be able to use the standard LSTMStateTransitionModel.from_data class with configuration (see the LSTMStateTransitionModel class for more details). However, sometimes you might want to add custom layers, or other complex components. In that case, you will build a custom model and pass it into LSTMStateTransitionModel. -In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. +In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we're generating a surrogate model from the physics-based model. For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment), you'll replace that generated data with your own. We build and fit a custom model using keras.layers. Finally, we compare performance to the standard format and the original model. """ @@ -19,20 +19,26 @@ from progpy.data_models import LSTMStateTransitionModel from progpy.models import BatteryElectroChemEOD + def run_example(): - print('Generating data...') + print("Generating data...") batt = BatteryElectroChemEOD() - future_loading_eqns = [lambda t, x=None: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)] + future_loading_eqns = [ + lambda t, x=None: batt.InputContainer({"i": 1 + 1.5 * load}) + for load in range(6) + ] # Generate data with different loading and step sizes # Adding the step size as an element of the output training_data = [] input_data = [] output_data = [] for i in range(9): - dt = i/3+0.25 + dt = i / 3 + 0.25 for loading_eqn in future_loading_eqns: - d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) - u = np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float) + d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) + u = np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float + ) z = d.outputs training_data.append((u, z)) input_data.append(u) @@ -41,35 +47,36 @@ def run_example(): # Step 2: Build standard model print("Building standard model...") m_batt = LSTMStateTransitionModel.from_data( - inputs = input_data, - outputs = output_data, - window=12, - epochs=30, + inputs=input_data, + outputs=output_data, + window=12, + epochs=30, units=64, # Additional units given the increased complexity of the system - input_keys = ['i', 'dt'], - output_keys = ['t', 'v']) + input_keys=["i", "dt"], + output_keys=["t", "v"], + ) # Step 3: Build custom model - print('Building custom model...') + print("Building custom model...") (u_all, z_all) = LSTMStateTransitionModel.pre_process_data(training_data, window=12) - + # Normalize n_inputs = len(training_data[0][0][0]) - u_mean = np.mean(u_all[:,0,:n_inputs], axis=0) - u_std = np.std(u_all[:,0,:n_inputs], axis=0) - # If there's no variation- dont normalize + u_mean = np.mean(u_all[:, 0, :n_inputs], axis=0) + u_std = np.std(u_all[:, 0, :n_inputs], axis=0) + # If there's no variation- dont normalize u_std[u_std == 0] = 1 z_mean = np.mean(z_all, axis=0) z_std = np.std(z_all, axis=0) - # If there's no variation- dont normalize + # If there's no variation- dont normalize z_std[z_std == 0] = 1 # Add output (since z_t-1 is last input) u_mean = np.hstack((u_mean, z_mean)) u_std = np.hstack((u_std, z_std)) - u_all = (u_all - u_mean)/u_std - z_all = (z_all - z_mean)/z_std + u_all = (u_all - u_mean) / u_std + z_all = (z_all - z_mean) / z_std # u_mean and u_std act on the column vector form (from inputcontainer) # so we need to transpose them to a column vector @@ -84,39 +91,47 @@ def run_example(): x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x) model = keras.Model(inputs, x) model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"]) - model.fit(u_all, z_all, epochs=30, callbacks = callbacks, validation_split = 0.1) + model.fit(u_all, z_all, epochs=30, callbacks=callbacks, validation_split=0.1) # Step 4: Build LSTMStateTransitionModel - m_custom = LSTMStateTransitionModel(model, - normalization=normalization, - input_keys = ['i', 'dt'], - output_keys = ['t', 'v'] + m_custom = LSTMStateTransitionModel( + model, + normalization=normalization, + input_keys=["i", "dt"], + output_keys=["t", "v"], ) # Step 5: Simulate - print('Simulating...') + print("Simulating...") t_counter = 0 x_counter = batt.initialize() + def future_loading(t, x=None): - return batt.InputContainer({'i': 3}) + return batt.InputContainer({"i": 3}) - def future_loading2(t, x = None): + def future_loading2(t, x=None): nonlocal t_counter, x_counter z = batt.output(x_counter) - z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter}) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z + data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) - results_custom = m_custom.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1) + results_custom = m_custom.simulate_to( + data.times[-1], future_loading2, dt=1, save_freq=1 + ) # Step 6: Compare performance - print('Comparing performance...') - data.outputs.plot(title='original model', compact=False) - results.outputs.plot(title='generated model', compact=False) - results_custom.outputs.plot(title='custom model', compact=False) + print("Comparing performance...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) + results_custom.outputs.plot(title="custom model", compact=False) plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/dde96c6cfd6ff506f5048a5c797653e4/kalman_filter.py b/docs/_downloads/dde96c6cfd6ff506f5048a5c797653e4/kalman_filter.py index 2c6ff9cf..39a544e0 100644 --- a/docs/_downloads/dde96c6cfd6ff506f5048a5c797653e4/kalman_filter.py +++ b/docs/_downloads/dde96c6cfd6ff506f5048a5c797653e4/kalman_filter.py @@ -32,8 +32,8 @@ class ThrownObject(LinearModel): Keyword Args ------------ process_noise : Optional, float or Dict[Srt, float] - Process noise (applied at dx/next_state). - Can be number (e.g., .2) applied to every state, a dictionary of values for each + Process noise (applied at dx/next_state). + Can be number (e.g., .2) applied to every state, a dictionary of values for each state (e.g., {'x1': 0.2, 'x2': 0.3}), or a function (x) -> x process_noise_dist : Optional, String distribution for process noise (e.g., normal, uniform, triangular) @@ -53,87 +53,94 @@ class ThrownObject(LinearModel): inputs = [] # no inputs, no way to control states = [ - 'x', # Position (m) - 'v' # Velocity (m/s) - ] + "x", # Position (m) + "v", # Velocity (m/s) + ] outputs = [ - 'x' # Position (m) + "x" # Position (m) ] events = [ - 'impact' # Event- object has impacted ground + "impact" # Event- object has impacted ground ] A = np.array([[0, 1], [0, 0]]) E = np.array([[0], [-9.81]]) C = np.array([[1, 0]]) - F = None # Will override method + F = None # Will override method - # The Default parameters. + # The Default parameters. # Overwritten by passing parameters dictionary into constructor default_parameters = { - 'thrower_height': 1.83, # m - 'throwing_speed': 40, # m/s - 'g': -9.81 # Acceleration due to gravity in m/s^2 + "thrower_height": 1.83, # m + "throwing_speed": 40, # m/s + "g": -9.81, # Acceleration due to gravity in m/s^2 } def initialize(self, u=None, z=None): - return self.StateContainer({ - 'x': self.parameters['thrower_height'], - # Thrown, so initial altitude is height of thrower - 'v': self.parameters['throwing_speed'] - # Velocity at which the ball is thrown - this guy is a professional baseball pitcher - }) - + return self.StateContainer( + { + "x": self.parameters["thrower_height"], + # Thrown, so initial altitude is height of thrower + "v": self.parameters["throwing_speed"], + # Velocity at which the ball is thrown - this guy is a professional baseball pitcher + } + ) + # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds. - # Threshold is met when Event State == 0. + # Threshold is met when Event State == 0. # However, this implementation is more efficient, so we included it def threshold_met(self, x): - return { - 'falling': x['v'] < 0, - 'impact': x['x'] <= 0 - } + return {"falling": x["v"] < 0, "impact": x["x"] <= 0} - def event_state(self, x): - x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height + def event_state(self, x): + x_max = x["x"] + np.square(x["v"]) / ( + -self.parameters["g"] * 2 + ) # Use speed and position to estimate maximum height return { - 'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0), # Throwing speed is max speed - 'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1 # 1 until falling begins, then it's fraction of height + "falling": np.maximum( + x["v"] / self.parameters["throwing_speed"], 0 + ), # Throwing speed is max speed + "impact": np.maximum(x["x"] / x_max, 0) + if x["v"] < 0 + else 1, # 1 until falling begins, then it's fraction of height } + def run_example(): # Step 1: Instantiate the model - m = ThrownObject(process_noise = 0, measurement_noise = 0) + m = ThrownObject(process_noise=0, measurement_noise=0) # Step 2: Instantiate the Kalman Filter State Estimator # Define the initial state to be slightly off of actual - x_guess = m.StateContainer({'x': 1.75, 'v': 35}) # Guess of initial state + x_guess = m.StateContainer({"x": 1.75, "v": 35}) # Guess of initial state # Note: actual is {'x': 1.83, 'v': 40} kf = KalmanFilter(m, x_guess) # Step 3: Run the Kalman Filter State Estimator - # Here we're using simulated data from the thrown_object. + # Here we're using simulated data from the thrown_object. # In a real application you would be using sensor data from the system dt = 0.01 # Time step (s) print_freq = 50 # Print every print_freq'th iteration x = m.initialize() u = m.InputContainer({}) # No input for this model - + for i in range(500): # Get simulated output (would be measured in a real application) z = m.output(x) # Estimate New State - kf.estimate(i*dt, u, z) + kf.estimate(i * dt, u, z) x_est = kf.x.mean # Print Results - if i%print_freq == 0: # Print every print_freq'th iteration - print(f"t: {i*dt:.2f}\n\tEstimate: {x_est}\n\tTruth: {x}") + if i % print_freq == 0: # Print every print_freq'th iteration + print(f"t: {i * dt:.2f}\n\tEstimate: {x_est}\n\tTruth: {x}") diff = {key: x_est[key] - x[key] for key in x.keys()} print(f"\t Diff: {diff}") # Update Real state for next step x = m.next_state(x, u, dt) -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/dff76589941453c29eb744884d63b0e8/03_Existing Models.ipynb b/docs/_downloads/dff76589941453c29eb744884d63b0e8/03_Existing Models.ipynb new file mode 100644 index 00000000..9dbe7380 --- /dev/null +++ b/docs/_downloads/dff76589941453c29eb744884d63b0e8/03_Existing Models.ipynb @@ -0,0 +1,732 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3. Using Included ProgPy Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "ProgPy is distributed with a few pre-constructed models that can be used in simulation or prognostics. These models for batteries, pumps, valves, among others, are included in the `progpy.models` package.\n", + "\n", + "In this notebook, we will be exploring a generalized overview of each included model. For more in-depth descriptions of the included models, please refer to the [Included Models](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## Table of Contents\n", + "* [Battery Models](#Battery-Models)\n", + " * [Battery Circuit](#BatteryCircuit)\n", + " * [BatteryElectroChemEOD](#BatteryElectroChemEOD)\n", + " * [BatteryElectroChemEOL](#BatteryElectroChemEOL)\n", + " * [Combined BatteryElectroChem (BatteryElectroChemEODEOL)](#Combined-BatteryElectroChem-(BatteryElectroChemEODEOL))\n", + " * [Simplified Battery](#Simplified-Battery)\n", + "* [Centrifugal Pump Model](#Centrifugal-Pump-Model)\n", + "* [Electric Powertrain Models](#Electric-Powertrain-Models)\n", + "* [Pneumatic Valve Model](#Pneumatic-Valve-Model)\n", + "* [Aircraft Flight Model](#Aircraft-Flight-Model)\n", + "* [Discrete State Model](#Discrete-State-Model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Battery Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will start by introducing the battery models: `BatteryCircuit`, `BatteryElectroChemEOD`, `BatteryElectroChemEOL`, combined `BatteryElectroChem` (`BatteryElectroChemEODEOL`), and `SimplifiedBattery`.\n", + "\n", + "In the following battery models, with the exception of `SimplifiedBattery`, the default model parameters included are for Li-ion batteries, specifically 18650-type cells. Experimental discharge curves for these cells can be downloaded from the Prognostics Center of Excellence [Data Repository](https://www.nasa.gov/intelligent-systems-division/discovery-and-systems-health/pcoe/pcoe-data-set-repository/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### BatteryCircuit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this first example, we will demonstrate how to set up, configure, and use the `BatteryCiruit` model. The `BatteryCircuit` model is a vectorized prognostics model for a battery, represented by an equivalent circuit model as described in [[Daigle Sankararaman 2013]](https://papers.phmsociety.org/index.php/phmconf/article/view/2253).\n", + "\n", + "We will start by importing the model and initializing a battery instance with default settings." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryCircuit\n", + "\n", + "batt = BatteryCircuit()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Information is passed to and from the model using containers that function like dictionaries. The keys of the containers are specific to the model. Let's look at the inputs (loading) and outputs (measurements) for the `BatteryCircuit` model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we refer to the `Circuit` tab under the battery models section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html), we can see that the input `i` refers to the current draw on the battery. The outputs `t` refers to the temperature in units Kelvin and `v` refers to voltage.\n", + "\n", + "We can also print out what events we're predicting and the internal states the model uses to represent the system." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"event(s): \", batt.events)\n", + "print(\"states: \", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that this particular model only predicts one event, called `EOD` (End of Discharge). The states listed include `tb`, the battery temperature in K; `qb`, the charge stored in Capacitor Cb of the equivalent circuit model; `qcp`, the charge stored in Capacitor Ccp of the equivalent circuit model; and `qcs`, the charge stored in Capacitor Ccs of the equivalent circuit model.\n", + "\n", + "Let's now look at the model's configuration parameters, which describe the specific system (in this case, the battery) that the model is simulating." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pprint import pprint\n", + "\n", + "print(\"Model configuration:\")\n", + "pprint(batt.parameters)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now use the model to do a simulation. To do this, we will first need to set a configuration and define a future load. For more details on future loading, refer to the related section in __[01 Simulation](01_Simulation.ipynb#Future-Loading)__." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"save_freq\": 100, \"dt\": 2, \"t0\": 700}\n", + "\n", + "\n", + "def future_loading(t, x=None):\n", + " if t < 600:\n", + " i = 2\n", + " elif t < 900:\n", + " i = 1\n", + " elif t < 1800:\n", + " i = 4\n", + " elif t < 3000:\n", + " i = 2\n", + " else:\n", + " i = 3\n", + " return batt.InputContainer({\"i\": i})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's run the simulation and plot the inputs and outputs. We can do this using the built-in [plot method](https://nasa.github.io/progpy/api_ref/progpy/SimResult.html#progpy.sim_result.SimResult.plot) based on matplotlib or with other imported plotting libraries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the input plot, we can see the current drawn change based on the logic we defined in the future loading function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current draw (amps)\", title=\"BatteryCircuit Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the output plots, we can observe how different input current draws affect the temperature and voltage curves. Generally, the graphs indicate that drawing a higher current leads to higher temperatures and lower voltage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " keys=[\"t\"],\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"temperature (K)\",\n", + " figsize=(10, 4),\n", + " title=\"BatteryCircuit Outputs\",\n", + ")\n", + "fig2 = simulated_results.outputs.plot(\n", + " keys=[\"v\"], xlabel=\"time (s)\", ylabel=\"voltage (V)\", figsize=(10, 4)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### BatteryElectroChemEOD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`BatteryElectroChemEOD` is a vectorized prognostics model for a battery, represented by an electrochemical equations as described in [[Daigle 2013]](https://papers.phmsociety.org/index.php/phmconf/article/view/2252). This model predicts the end of discharge event. Let's start by examining the model inputs, outputs, event(s), and states. We can refer to the `ElectroChem (EOD)` tab under the battery models section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOD\n", + "\n", + "batt = BatteryElectroChemEOD()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now run a simulation until `EOD`, or end of discharge. We wil use the same future loading function as the previous example and specify the configuration threshold event as `EOD`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"save_freq\": 100, \"dt\": 2, \"events\": [\"EOD\"]}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the input plot, we can see the current draw change based on the future loading function we defined." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current draw (amps)\", title=\"BatteryElectroChemEOD Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the output plots, we can see changes in voltage and temperature. We can also print parameters like `VEOD`, or the end of discharge voltage threshold. This value is the voltage at which a battery is considered fully discharged." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " keys=[\"v\"],\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"voltage (V)\",\n", + " figsize=(10, 4),\n", + " title=\"BatteryElectroChemEOD Outputs\",\n", + ")\n", + "print(\"End of discharge voltage threshold:\", batt.parameters[\"VEOD\"])\n", + "\n", + "fig2 = simulated_results.outputs.plot(\n", + " keys=[\"t\"], xlabel=\"time (s)\", ylabel=\"temperature (°C)\", figsize=(10, 4)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the event state plot, we can see `EOD` decline until it reaches 0, or when the end of discharge event has occurred. This event occurence is when the simulation reached threshold and ended." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"event state\",\n", + " labels={\"EOD\"},\n", + " title=\"BatteryElectroChemEOD Event State\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### BatteryElectroChemEOL" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`BatteryElectroChemEOL` is a vectorized prognostics model for battery degradation, represented by an electrochemical model as described in [[Daigle 2016]](https://arc.aiaa.org/doi/pdf/10.2514/6.2016-2132). Let's go ahead and import the model, initialize a battery instance, and take a closer look at the details. We can also refer to the `ElectroChem (EOL)` tab under the battery model section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html). Note that the model has no outputs. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChemEOL\n", + "\n", + "batt = BatteryElectroChemEOL()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now run a simulation to predict when we will reach insufficient battery capacity. We will use the same future loading function as the previous examples and specify the configuration threshold event as `InsufficientCapacity`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"save_freq\": 100, \"dt\": 2, \"events\": [\"InsufficientCapacity\"]}\n", + "\n", + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the input plot, we can once again see the current draw change based on the future loading function we defined." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current draw (amps)\", title=\"BatteryElectroChemEOL Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the event state plot, we can see `InsufficientCapacity` linearly decrease until it reaches 0, or when the event has occurred." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"event state\",\n", + " labels={\"InsufficientCapacity\"},\n", + " title=\"BatteryElectroChemEOL Event State\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Combined BatteryElectroChem (BatteryElectroChemEODEOL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`BatteryElectroChemEODEOL` is a prognostics model for battery degradation and discharge, represented by an electrochemical model as described in [[Daigle 2013]](https://papers.phmsociety.org/index.php/phmconf/article/view/2252) and [[Daigle 2016]](https://arc.aiaa.org/doi/pdf/10.2514/6.2016-2132). This model combines both the `BatteryElectroChemEOL` and `BatteryElectroChemEOD` models.\n", + "\n", + "We will start by importing the model, initializing a battery instance, and examining the model details. We can refer to the `ElectroChem (Combo)` tab under the battery model section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import BatteryElectroChem\n", + "\n", + "batt = BatteryElectroChem()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will simulate a battery until `EOL` (End of Life). As battery capacity decreases with use, `EOL` is reached when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity).\n", + "\n", + "We will now set the configuration and define a future loading function. As we want to simulate until `EOL`, we will set the configuration event to `InsufficientCapacity`. The future loading function is designed to charge the battery until `EOD` is 0.95 and then discharge until `EOD` is 0.05. Note that states represent the progress towards the event occurring. An event state of 0 indicates the event has occurred and 1 indicates no progress towards the event." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"save_freq\": 1000,\n", + " \"dt\": 2,\n", + " \"events\": \"InsufficientCapacity\",\n", + "}\n", + "\n", + "\n", + "def future_loading(t, x=None):\n", + " load = 1\n", + "\n", + " if x is not None:\n", + " event_state = batt.event_state(x)\n", + " if event_state[\"EOD\"] > 0.95:\n", + " load = 1 # Discharge\n", + " elif event_state[\"EOD\"] < 0.05:\n", + " load = -1 # Charge\n", + "\n", + " return batt.InputContainer({\"i\": load})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now simulate to the threshold and print the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulated_results = batt.simulate_to_threshold(future_loading, **config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now plot the inputs, outputs, and event states. In the input plot, we can see the current drawn fluctuates between -1 and 1 based on the current load we defined in the future loading function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.inputs.plot(\n", + " xlabel=\"time (s)\", ylabel=\"current drawn (amps)\", title=\"BatteryElectroChem Input\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the output plots, we can see changes in the voltage and temperature." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.outputs.plot(\n", + " keys=[\"v\"],\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"voltage (V)\",\n", + " figsize=(10, 4),\n", + " title=\"BatteryElectroChem Outputs\",\n", + ")\n", + "fig2 = simulated_results.outputs.plot(\n", + " keys=[\"t\"], xlabel=\"time (s)\", ylabel=\"temperature (°C)\", figsize=(10, 4)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the event states plot, we can see `EOD` incrementally spiking and `InsufficientCapacity` linearly declining until it reaches 0, or when the event has occurred." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\",\n", + " ylabel=\"event states\",\n", + " labels={\"EOD\", \"InsufficientCapacity\"},\n", + " title=\"BatteryElectroChem Event States\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Simplified Battery" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`SimplifiedBattery` is a model from [[Sierra 2019]](https://www.sciencedirect.com/science/article/abs/pii/S0951832018301406). It was initially introduced in the __[2024 PHM Tutorial](2024PHMTutorial.ipynb)__. Unlike the previous models, the default parameters are for a Tattu battery. We can refer to the `Simplified` tab under the battery model section in the [documentation](https://nasa.github.io/progpy/api_ref/progpy/IncludedModels.html) for more details.\n", + "\n", + "Let's start by importing the model, initializing an instance, and examining it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from progpy.models import SimplifiedBattery\n", + "from progpy.loading import Piecewise\n", + "\n", + "batt = SimplifiedBattery()\n", + "\n", + "print(\"inputs:\", batt.inputs)\n", + "print(\"outputs:\", batt.outputs)\n", + "print(\"event(s): \", batt.events)\n", + "print(\"states:\", batt.states)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now define future loading based on a piecewise function and simulate to a set time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "future_loading = Piecewise(\n", + " dict, [600, 900, 1800, 3000, float(\"inf\")], {\"P\": [25, 12, 50, 25, 33]}\n", + ")\n", + "\n", + "simulated_results = batt.simulate_to(200, future_loading, {\"v\": 4.183})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's look at the event states plot, where we can see `EOD` and `Low V`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = simulated_results.event_states.plot(\n", + " xlabel=\"time (s)\", ylabel=\"event state\", title=\"SimplifiedBattery Event States\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Centrifugal Pump Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Electric Powertrain Models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pneumatic Valve Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Aircraft Flight Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Discrete State Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**A version of this section will be added in release v1.9**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.10.7 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.7" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "610c699f0cd8c4f129acd9140687fff6866bed0eb8e82f249fc8848b827b628c" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/_downloads/e2095f421d8ca12b927634f3fbbba6c4/future_loading.py b/docs/_downloads/e2095f421d8ca12b927634f3fbbba6c4/future_loading.py index 78f72ebb..6519f1de 100644 --- a/docs/_downloads/e2095f421d8ca12b927634f3fbbba6c4/future_loading.py +++ b/docs/_downloads/e2095f421d8ca12b927634f3fbbba6c4/future_loading.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use future loading. +Example demonstrating ways to use future loading. """ import matplotlib.pyplot as plt @@ -11,28 +11,32 @@ from progpy.models import BatteryCircuit from statistics import mean -def run_example(): + +def run_example(): m = BatteryCircuit() ## Example 1: Variable (piecewise) loading future_loading = Piecewise( - m.InputContainer, - [600, 900, 1800, 3000, float('inf')], - {'i': [2, 1, 4, 2, 3]}) - + m.InputContainer, [600, 900, 1800, 3000, float("inf")], {"i": [2, 1, 4, 2, 3]} + ) + # Simulate to threshold options = { - 'save_freq': 100, # Frequency at which results are saved - 'dt': 2 # Timestep + "save_freq": 100, # Frequency at which results are saved + "dt": 2, # Timestep } simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Variable Load Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Variable Load Event State', xlabel='time (s)') - - ## Example 2: Moving Average loading - # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system, + simulated_results.inputs.plot( + ylabel="Variable Load Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Variable Load Event State", xlabel="time (s)" + ) + + ## Example 2: Moving Average loading + # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system, # but don't have a good way of predicting it, and you expect loading to be steady from progpy.loading import MovingAverage @@ -40,54 +44,83 @@ def run_example(): future_loading = MovingAverage(m.InputContainer) # Now lets say you have some measured loads to add - measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2] - + measured_loads = [ + 10, + 11.5, + 12.0, + 8, + 2.1, + 1.8, + 1.99, + 2.0, + 2.01, + 1.89, + 1.92, + 2.01, + 2.1, + 2.2, + ] + # We're going to feed these into the future loading eqn for load in measured_loads: - future_loading.add_load({'i': load}) - + future_loading.add_load({"i": load}) + # Now the future_loading eqn is setup to use the moving average of whats been seen # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Moving Average Event State', xlabel='time (s)') + simulated_results.inputs.plot( + ylabel="Moving Average Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Moving Average Event State", xlabel="time (s)" + ) # In this case, this estimate is wrong because loading will not be steady, but at least it would give you an approximation. - # If more measurements are received, the user could estimate the moving average here and then run a new simulation. + # If more measurements are received, the user could estimate the moving average here and then run a new simulation. - ## Example 3: Gaussian Distribution - # In this example we will still be doing a variable loading like the first option, but we are going to use a + ## Example 3: Gaussian Distribution + # In this example we will still be doing a variable loading like the first option, but we are going to use a # gaussian distribution for each input. future_loading = Piecewise( - m.InputContainer, - [600, 900, 1800, 3000, float('inf')], - {'i': [2, 1, 4, 2, 3]}) + m.InputContainer, [600, 900, 1800, 3000, float("inf")], {"i": [2, 1, 4, 2, 3]} + ) future_loading_with_noise = GaussianNoiseLoadWrapper(future_loading, 0.2) # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading_with_noise, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Variable Gaussian Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Variable Gaussian Event State', xlabel='time (s)') + simulated_results.inputs.plot( + ylabel="Variable Gaussian Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Variable Gaussian Event State", xlabel="time (s)" + ) # Example 4: Gaussian- increasing with time - # For this we're using moving average. This is realistic because the further out from current time you get, - # the more uncertainty there is in your prediction. + # For this we're using moving average. This is realistic because the further out from current time you get, + # the more uncertainty there is in your prediction. def future_loading(t, x=None): - std = future_loading.base_std + future_loading.std_slope * (t - future_loading.t) - return {key : normal(future_loading.load[key], std) for key in future_loading.load.keys()} - future_loading.load = {key : 0 for key in m.inputs} + std = future_loading.base_std + future_loading.std_slope * ( + t - future_loading.t + ) + return { + key: normal(future_loading.load[key], std) + for key in future_loading.load.keys() + } + + future_loading.load = {key: 0 for key in m.inputs} future_loading.base_std = 0.001 future_loading.std_slope = 1e-4 future_loading.t = 0 # Lets define another function to handle the moving average logic window = 10 # Number of elements in window + def moving_avg(i): for key in m.inputs: moving_avg.loads[key].append(i[key]) @@ -95,51 +128,78 @@ def moving_avg(i): del moving_avg.loads[key][0] # Remove first item # Update future loading eqn - future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs} - moving_avg.loads = {key : [] for key in m.inputs} + future_loading.load = {key: mean(moving_avg.loads[key]) for key in m.inputs} - # OK, we've setup the logic of the moving average. + moving_avg.loads = {key: [] for key in m.inputs} + + # OK, we've setup the logic of the moving average. # Now lets say you have some measured loads to add - measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2] - + measured_loads = [ + 10, + 11.5, + 12.0, + 8, + 2.1, + 1.8, + 1.99, + 2.0, + 2.01, + 1.89, + 1.92, + 2.01, + 2.1, + 2.2, + ] + # We're going to feed these into the future loading eqn for load in measured_loads: - moving_avg({'i': load}) + moving_avg({"i": load}) # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Moving Average Event State', xlabel='time (s)') - + simulated_results.inputs.plot( + ylabel="Moving Average Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Moving Average Event State", xlabel="time (s)" + ) + # In this example future_loading.t has to be updated with current time before each prediction. - + # Example 5 Function of state # here we're pretending that input is a function of SOC. It increases as we approach SOC def future_loading(t, x=None): if x is not None: event_state = future_loading.event_state(x) - return m.InputContainer({'i': future_loading.start + (1-event_state['EOD']) * future_loading.slope}) # default - return m.InputContainer({'i': future_loading.start}) + return m.InputContainer( + { + "i": future_loading.start + + (1 - event_state["EOD"]) * future_loading.slope + } + ) # default + return m.InputContainer({"i": future_loading.start}) + future_loading.t = 0 future_loading.event_state = m.event_state - future_loading.slope = 2 # difference between input with EOD = 1 and 0. + future_loading.slope = 2 # difference between input with EOD = 1 and 0. future_loading.start = 0.5 # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'f(x) Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'f(x) Event State', xlabel='time (s)') + simulated_results.inputs.plot(ylabel="f(x) Current (amps)", xlabel="time (s)") + simulated_results.event_states.plot(ylabel="f(x) Event State", xlabel="time (s)") # In this example future_loading.t has to be updated with current time before each prediction. # Show plots plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/e2cdb944ece759faff6875cf69f016c7/derived_params.ipynb b/docs/_downloads/e2cdb944ece759faff6875cf69f016c7/derived_params.ipynb index 5ff89fac..c784710e 100644 --- a/docs/_downloads/e2cdb944ece759faff6875cf69f016c7/derived_params.ipynb +++ b/docs/_downloads/e2cdb944ece759faff6875cf69f016c7/derived_params.ipynb @@ -1,54 +1,102 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating ways to use the derived parameters feature for model building. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models.thrown_object import ThrownObject\n\ndef run_example():\n # For this example we will use the ThrownObject model from the new_model example.\n # We will extend that model to include a derived parameter\n # Let's assume that the throwing_speed was actually a function of thrower_height \n # (i.e., a taller thrower would throw the ball faster).\n # Here's how we would implement that\n\n # Step 1: Define a function for the relationship between thrower_height and throwing_speed.\n def update_thrown_speed(params):\n return {\n 'throwing_speed': params['thrower_height'] * 21.85\n } # Assumes thrown_speed is linear function of height\n # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary\n\n # Step 2: Define the param callbacks\n ThrownObject.param_callbacks.update({\n 'thrower_height': [update_thrown_speed]\n }) # Tell the derived callbacks feature to call this function when thrower_height changes.\n # Note: Usually we would define this method within the class\n # for this example, we're doing it separately to improve readability\n # Note2: You can also have more than one function be called when a single parameter is changed.\n # Do this by adding the additional callbacks to the list (e.g., 'thrower_height': [update_thrown_speed, other_fcn])\n\n # Step 3: Use!\n obj = ThrownObject()\n print(\"Default Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed']))\n \n # Now let's change the thrower_height\n print(\"changing height...\")\n obj.parameters['thrower_height'] = 1.75 # Our thrower is 1.75 m tall\n print(\"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed']))\n print(\"Notice how speed changed automatically with height\")\n\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating ways to use the derived parameters feature for model building. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models.thrown_object import ThrownObject\n", + "\n", + "\n", + "def run_example():\n", + " # For this example we will use the ThrownObject model from the new_model example.\n", + " # We will extend that model to include a derived parameter\n", + " # Let's assume that the throwing_speed was actually a function of thrower_height\n", + " # (i.e., a taller thrower would throw the ball faster).\n", + " # Here's how we would implement that\n", + "\n", + " # Step 1: Define a function for the relationship between thrower_height and throwing_speed.\n", + " def update_thrown_speed(params):\n", + " return {\n", + " \"throwing_speed\": params[\"thrower_height\"] * 21.85\n", + " } # Assumes thrown_speed is linear function of height\n", + "\n", + " # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary\n", + "\n", + " # Step 2: Define the param callbacks\n", + " ThrownObject.param_callbacks.update(\n", + " {\"thrower_height\": [update_thrown_speed]}\n", + " ) # Tell the derived callbacks feature to call this function when thrower_height changes.\n", + " # Note: Usually we would define this method within the class\n", + " # for this example, we're doing it separately to improve readability\n", + " # Note2: You can also have more than one function be called when a single parameter is changed.\n", + " # Do this by adding the additional callbacks to the list (e.g., 'thrower_height': [update_thrown_speed, other_fcn])\n", + "\n", + " # Step 3: Use!\n", + " obj = ThrownObject()\n", + " print(\n", + " \"Default Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(\n", + " obj.parameters[\"thrower_height\"], obj.parameters[\"throwing_speed\"]\n", + " )\n", + " )\n", + "\n", + " # Now let's change the thrower_height\n", + " print(\"changing height...\")\n", + " obj.parameters[\"thrower_height\"] = 1.75 # Our thrower is 1.75 m tall\n", + " print(\n", + " \"\\nUpdated Settings:\\n\\tthrower_height: {}\\n\\tthowing_speed: {}\".format(\n", + " obj.parameters[\"thrower_height\"], obj.parameters[\"throwing_speed\"]\n", + " )\n", + " )\n", + " print(\"Notice how speed changed automatically with height\")\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/e2fe425b405c324c9ddabb88b4675b84/sim_pump.py b/docs/_downloads/e2fe425b405c324c9ddabb88b4675b84/sim_pump.py index f895f749..446bec20 100644 --- a/docs/_downloads/e2fe425b405c324c9ddabb88b4675b84/sim_pump.py +++ b/docs/_downloads/e2fe425b405c324c9ddabb88b4675b84/sim_pump.py @@ -2,61 +2,68 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example of a centrifugal pump being simulated until threshold is met. +Example of a centrifugal pump being simulated until threshold is met. """ from progpy.models import CentrifugalPump -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt from progpy.sim_result import SimResult -def run_example(): + + +def run_example(): # Step 1: Setup Pump - pump = CentrifugalPump(process_noise= 0) - pump.parameters['x0']['wA'] = 0.01 # Set Wear Rate + pump = CentrifugalPump(process_noise=0) + pump.parameters["x0"]["wA"] = 0.01 # Set Wear Rate # Step 2: Setup Future Loading cycle_time = 3600 + def future_loading(t, x=None): t = t % cycle_time - if t < cycle_time/2.0: + if t < cycle_time / 2.0: V = 471.2389 - elif t < cycle_time/2 + 100: - V = 471.2389 + (t-cycle_time/2) + elif t < cycle_time / 2 + 100: + V = 471.2389 + (t - cycle_time / 2) elif t < cycle_time - 100: V = 571.2389 else: - V = 471.2398 - (t-cycle_time) + V = 471.2398 - (t - cycle_time) - return pump.InputContainer({ - 'Tamb': 290, - 'V': V, - 'pdisch': 928654, - 'psuc': 239179, - 'wsync': V * 0.8 - }) + return pump.InputContainer( + {"Tamb": 290, "V": V, "pdisch": 928654, "psuc": 239179, "wsync": V * 0.8} + ) # Step 3: Sim - first_output = pump.output(pump.initialize(future_loading(0),{})) - config = { - 'horizon': 1e5, - 'save_freq': 1e3, - 'print': True - } - simulated_results = pump.simulate_to_threshold(future_loading, first_output, **config) + first_output = pump.output(pump.initialize(future_loading(0), {})) + config = {"horizon": 1e5, "save_freq": 1e3, "print": True} + simulated_results = pump.simulate_to_threshold( + future_loading, first_output, **config + ) # Step 4: Plot Results - simulated_results.inputs.plot(compact = False, title = 'Inputs', xlabel = 'time', ylabel = {lbl: lbl for lbl in pump.inputs}) - simulated_results.outputs.plot(compact = False, title = 'Outputs', xlabel = 'time', ylabel = '') - simulated_results.states.plot(compact = False, title = 'States', xlabel = 'time', ylabel = '') - simulated_results.event_states.plot(compact = False, title = 'Events', xlabel = 'time', ylabel = '') + simulated_results.inputs.plot( + compact=False, + title="Inputs", + xlabel="time", + ylabel={lbl: lbl for lbl in pump.inputs}, + ) + simulated_results.outputs.plot( + compact=False, title="Outputs", xlabel="time", ylabel="" + ) + simulated_results.states.plot( + compact=False, title="States", xlabel="time", ylabel="" + ) + simulated_results.event_states.plot( + compact=False, title="Events", xlabel="time", ylabel="" + ) thresholds_met = [pump.threshold_met(x) for x in simulated_results.states] thresholds_met = SimResult(simulated_results.times, thresholds_met) - thresholds_met.plot(compact = False, title = 'Threshold Met', xlabel = 'time', ylabel = '') - - + thresholds_met.plot(compact=False, title="Threshold Met", xlabel="time", ylabel="") plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/e38d3e121ad5b2b31d88a0fdb681fd3b/ensemble.py b/docs/_downloads/e38d3e121ad5b2b31d88a0fdb681fd3b/ensemble.py index 79e1b57b..1bad28f9 100644 --- a/docs/_downloads/e38d3e121ad5b2b31d88a0fdb681fd3b/ensemble.py +++ b/docs/_downloads/e38d3e121ad5b2b31d88a0fdb681fd3b/ensemble.py @@ -6,13 +6,13 @@ .. dropdown:: More details - Ensemble model is an approach to modeling where one or more different models are simulated together and then aggregated into a single prediction. This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors. + Ensemble model is an approach to modeling where one or more different models are simulated together and then aggregated into a single prediction. This is generally done to improve the accuracy of prediction when you have multiple models that each represent part of the behavior or represent a distribution of different behaviors. - In this example, 4 different equivalent circuit models are setup with different configuration parameters. They are each simulated individually. Then an ensemble model is created for the 4 models, and that is simulated individually. The results are plotted. + In this example, 4 different equivalent circuit models are setup with different configuration parameters. They are each simulated individually. Then an ensemble model is created for the 4 models, and that is simulated individually. The results are plotted. The results are partially skewed by a poorly configured model, so we change the aggregation method to account for that. and resimulate, showing the results - Finally, an ensemble model is created for two different models with different states. That model is simulated with time and the results are plotted. + Finally, an ensemble model is created for two different models with different states. That model is simulated with time and the results are plotted. """ from matplotlib import pyplot as plt @@ -21,48 +21,65 @@ from progpy.datasets import nasa_battery from progpy.models import BatteryElectroChemEOD, BatteryCircuit + def run_example(): # Example 1: Different model configurations # Download data - print('downloading data (this may take a while)...') + print("downloading data (this may take a while)...") data = nasa_battery.load_data(8)[1] # Prepare data RUN_ID = 0 - test_input = [{'i': i} for i in data[RUN_ID]['current']] - test_time = data[RUN_ID]['relativeTime'] + test_input = [{"i": i} for i in data[RUN_ID]["current"]] + test_time = data[RUN_ID]["relativeTime"] # Setup models - # In this case, we have some uncertainty on the parameters of the model, + # In this case, we have some uncertainty on the parameters of the model, # so we're setting up a few versions of the circuit model with different parameters. - print('Setting up models...') - m_circuit = BatteryCircuit(process_noise = 0, measurement_noise = 0) - m_circuit_2 = BatteryCircuit(process_noise = 0, measurement_noise = 0, qMax = 7860) - m_circuit_3 = BatteryCircuit(process_noise = 0, measurement_noise = 0, qMax = 6700, Rs = 0.055) - m_ensemble = EnsembleModel((m_circuit, m_circuit_2, m_circuit_3), process_noise = 0, measurement_noise = 0) + print("Setting up models...") + m_circuit = BatteryCircuit(process_noise=0, measurement_noise=0) + m_circuit_2 = BatteryCircuit(process_noise=0, measurement_noise=0, qMax=7860) + m_circuit_3 = BatteryCircuit( + process_noise=0, measurement_noise=0, qMax=6700, Rs=0.055 + ) + m_ensemble = EnsembleModel( + (m_circuit, m_circuit_2, m_circuit_3), process_noise=0, measurement_noise=0 + ) # Evaluate models - print('Evaluating models...') + print("Evaluating models...") + def future_loading(t, x=None): for i, mission_time in enumerate(test_time): if mission_time > t: return m_circuit.InputContainer(test_input[i]) + results_ensemble = m_ensemble.simulate_to(test_time.iloc[-1], future_loading) # Plot results - print('Producing figures...') - plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth') - plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='red', label='ensemble') + print("Producing figures...") + plt.plot(test_time, data[RUN_ID]["voltage"], color="green", label="ground truth") + plt.plot( + results_ensemble.times, + [z["v"] for z in results_ensemble.outputs], + color="red", + label="ensemble", + ) plt.legend() # Note: This is a very poor performing model # there was an outlier model (m_circuit_3), which effected the quality of the model prediction # This can be resolved by using a different aggregation_method. For example, median # In a real scenario, you would likely remove this model, this is just to illustrate outlier elimination - print('Updating with Median ') - m_ensemble.parameters['aggregation_method'] = np.median + print("Updating with Median ") + m_ensemble.parameters["aggregation_method"] = np.median results_ensemble = m_ensemble.simulate_to(test_time.iloc[-1], future_loading) - plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='orange', label='ensemble - median') + plt.plot( + results_ensemble.times, + [z["v"] for z in results_ensemble.outputs], + color="orange", + label="ensemble - median", + ) plt.legend() # Example 2: Different Models @@ -71,31 +88,49 @@ def future_loading(t, x=None): # These two models share one state, but besides that they have different states # Setup Model - print('Setting up models...') - m_electro = BatteryElectroChemEOD(process_noise = 0, measurement_noise = 0) - m_ensemble = EnsembleModel((m_circuit, m_electro), process_noise = 0, measurement_noise=0) + print("Setting up models...") + m_electro = BatteryElectroChemEOD(process_noise=0, measurement_noise=0) + m_ensemble = EnsembleModel( + (m_circuit, m_electro), process_noise=0, measurement_noise=0 + ) # Evaluate models - print('Evaluating models...') - print('\tEnsemble') + print("Evaluating models...") + print("\tEnsemble") results_ensemble = m_ensemble.simulate_to(test_time.iloc[-1], future_loading) - print('\tCircuit 1') + print("\tCircuit 1") results_circuit1 = m_circuit.simulate_to(test_time.iloc[-1], future_loading) - print('\tElectroChem') + print("\tElectroChem") results_electro = m_electro.simulate_to(test_time.iloc[-1], future_loading) # Plot results - print('Producing figures...') + print("Producing figures...") plt.figure() - plt.plot(test_time, data[RUN_ID]['voltage'], color='green', label='ground truth') - plt.plot(results_circuit1.times, [z['v'] for z in results_circuit1.outputs], color='blue', label='circuit') - plt.plot(results_electro.times, [z['v'] for z in results_electro.outputs], color='red', label='electro chemistry') - plt.plot(results_ensemble.times, [z['v'] for z in results_ensemble.outputs], color='yellow', label='ensemble') + plt.plot(test_time, data[RUN_ID]["voltage"], color="green", label="ground truth") + plt.plot( + results_circuit1.times, + [z["v"] for z in results_circuit1.outputs], + color="blue", + label="circuit", + ) + plt.plot( + results_electro.times, + [z["v"] for z in results_electro.outputs], + color="red", + label="electro chemistry", + ) + plt.plot( + results_ensemble.times, + [z["v"] for z in results_ensemble.outputs], + color="yellow", + label="ensemble", + ) plt.legend() - - # Note that the result may not be exactly between the other two models. + + # Note that the result may not be exactly between the other two models. # This is because of aggregation is done in 2 steps: at state transition and then at output calculation -# This allows the module to be executed directly -if __name__=='__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/e544b7b61c508397101940603daddf04/direct_model.py b/docs/_downloads/e544b7b61c508397101940603daddf04/direct_model.py index 07d009a2..80bda7ff 100644 --- a/docs/_downloads/e544b7b61c508397101940603daddf04/direct_model.py +++ b/docs/_downloads/e544b7b61c508397101940603daddf04/direct_model.py @@ -11,31 +11,34 @@ import numpy as np from progpy.models import ThrownObject + def run_example(): # Here is how estimating time of event works for a timeseries model m = ThrownObject() x = m.initialize() - print(m.__class__.__name__, "(Direct Model)" if m.is_direct else "(Timeseries Model)") + print( + m.__class__.__name__, "(Direct Model)" if m.is_direct else "(Timeseries Model)" + ) tic = time.perf_counter() - print('Time of event: ', m.time_of_event(x, dt = 0.05)) + print("Time of event: ", m.time_of_event(x, dt=0.05)) toc = time.perf_counter() - print(f'execution: {(toc-tic)*1000:0.4f} milliseconds') + print(f"execution: {(toc - tic) * 1000:0.4f} milliseconds") # Step 1: Define DirectModel # In this case we're extending the ThrownObject model to include the time_to_event method, defined in DirectModel - # In the case of thrown objects, we can solve the differential equation + # In the case of thrown objects, we can solve the differential equation # to estimate the time at which the events occur. class DirectThrownObject(ThrownObject): def time_of_event(self, x, *args, **kwargs): # calculate time when object hits ground given x['x'] and x['v'] # 0 = x0 + v0*t - 0.5*g*t^2 - g = self.parameters['g'] - t_impact = -(x['v'] + np.sqrt(x['v']*x['v'] - 2*g*x['x']))/g + g = self.parameters["g"] + t_impact = -(x["v"] + np.sqrt(x["v"] * x["v"] - 2 * g * x["x"])) / g # 0 = v0 - g*t - t_falling = -x['v']/g - - return {'falling': t_falling, 'impact': t_impact} + t_falling = -x["v"] / g + + return {"falling": t_falling, "impact": t_impact} # Note that adding *args and **kwargs is optional. # Having these arguments makes the function interchangeable with other models @@ -45,18 +48,23 @@ def time_of_event(self, x, *args, **kwargs): m = DirectThrownObject() x = m.initialize() # Using Initial state # Now instead of simulating to threshold, we can estimate it directly from the state, like so - print('\n', m.__class__.__name__, "(Direct Model)" if m.is_direct else "(Timeseries Model)") + print( + "\n", + m.__class__.__name__, + "(Direct Model)" if m.is_direct else "(Timeseries Model)", + ) tic = time.perf_counter() - print('Time of event: ', m.time_of_event(x)) + print("Time of event: ", m.time_of_event(x)) toc = time.perf_counter() - print(f'execution: {(toc-tic)*1000:0.4f} milliseconds') + print(f"execution: {(toc - tic) * 1000:0.4f} milliseconds") - # Notice that execution is MUCH faster for the direct model. + # Notice that execution is MUCH faster for the direct model. # This is even more pronounced for events that occur later in the simulation. - # In this case, the DirectThrownObject has a defined next_state and output equation, + # In this case, the DirectThrownObject has a defined next_state and output equation, # allowing it to be used with a state estimator (e..g, Particle Filter) -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/e658975d558d3b0da66ab1df4fbe0f9f/visualize.py b/docs/_downloads/e658975d558d3b0da66ab1df4fbe0f9f/visualize.py index f43f8c48..e279ce62 100644 --- a/docs/_downloads/e658975d558d3b0da66ab1df4fbe0f9f/visualize.py +++ b/docs/_downloads/e658975d558d3b0da66ab1df4fbe0f9f/visualize.py @@ -2,38 +2,57 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating the Visualization Module. +Example demonstrating the Visualization Module. """ import matplotlib.pyplot as plt from progpy.visualize import plot_timeseries from progpy.models.thrown_object import ThrownObject + def run_example(): - print('Visualize Module Example') + print("Visualize Module Example") m = ThrownObject() - # Step 2: Setup for simulation + # Step 2: Setup for simulation def future_load(t, x=None): return {} # Step 3: Simulate to impact - event = 'impact' - options={'dt':0.005, 'save_freq':1} - simulated_results = m.simulate_to_threshold(future_load, - threshold_keys=[event], - **options) - + event = "impact" + options = {"dt": 0.005, "save_freq": 1} + simulated_results = m.simulate_to_threshold( + future_load, threshold_keys=[event], **options + ) # Display states # ============== - plot_timeseries(simulated_results.times, simulated_results.states, - options = {'compact': False, 'suptitle': 'state evolution', 'title': True, - 'xlabel': 'time', 'ylabel': {'x': 'position', 'v': 'velocity'}, 'display_labels': 'minimal'}, - legend = {'display': True, 'display_at_subplot': 'all'} ) - plot_timeseries(simulated_results.times, simulated_results.states, options = {'compact': True, 'suptitle': 'state evolution', 'title': 'example title', - 'xlabel': 'time', 'ylabel':'position'}) + plot_timeseries( + simulated_results.times, + simulated_results.states, + options={ + "compact": False, + "suptitle": "state evolution", + "title": True, + "xlabel": "time", + "ylabel": {"x": "position", "v": "velocity"}, + "display_labels": "minimal", + }, + legend={"display": True, "display_at_subplot": "all"}, + ) + plot_timeseries( + simulated_results.times, + simulated_results.states, + options={ + "compact": True, + "suptitle": "state evolution", + "title": "example title", + "xlabel": "time", + "ylabel": "position", + }, + ) plt.show() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py b/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py index 7968caf7..a34cf88e 100644 --- a/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py +++ b/docs/_downloads/e7ee62f41f59a26f3cd93b4b0497cc90/lstm_model.py @@ -1,5 +1,5 @@ # Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. -# This ensures that the directory containing examples is in the python search directories +# This ensures that the directory containing examples is in the python search directories """ Example building a LSTMStateTransitionModel from data. This is a simple example of how to use the LSTMStateTransitionModel class. @@ -16,11 +16,12 @@ from progpy.data_models import LSTMStateTransitionModel from progpy.models import ThrownObject, BatteryElectroChemEOD + def run_example(): # ----------------------------------------------------- # Example 1- set timestep # Here we will create a model for a specific timestep. - # The model will only work with that timestep + # The model will only work with that timestep # This is useful if you know the timestep you would like to use # ----------------------------------------------------- TIMESTEP = 0.1 @@ -30,24 +31,27 @@ def run_example(): # For cases where you're generating a model from data # (e.g., collected from a testbed or a real-world environment), # you'll replace that generated data with your own. - print('Generating data') + print("Generating data") m = ThrownObject() def future_loading(t, x=None): - return m.InputContainer({}) # No input for thrown object + return m.InputContainer({}) # No input for thrown object - data = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP, dt=TIMESTEP) + data = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP, dt=TIMESTEP + ) # Step 2: Generate model # We'll use the LSTMStateTransitionModel class to generate a model # from the data. - print('Building model...') + print("Building model...") m2 = LSTMStateTransitionModel.from_data( inputs=[data.inputs], outputs=[data.outputs], window=4, epochs=30, # Maximum number of epochs, may stop earlier if early stopping enabled - output_keys=['x']) + output_keys=["x"], + ) # We can see the training history # Should show the model progressively getting better @@ -55,16 +59,16 @@ def future_loading(t, x=None): # If val_loss starts going up again, then we may be overtraining m2.plot_history() plt.show() - + # Step 3: Use model to simulate_to time of threshold - print('Simulating with generated model...') + print("Simulating with generated model...") t_counter = 0 x_counter = m.initialize() def future_loading2(t, x=None): - # Future Loading is a bit complicated here - # Loading for the resulting model includes the data inputs, + # Future Loading is a bit complicated here + # Loading for the resulting model includes the data inputs, # and the output from the last timestep nonlocal t_counter, x_counter z = m.output(x_counter) @@ -72,50 +76,67 @@ def future_loading2(t, x=None): x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z - - results2 = m2.simulate_to(data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP) + + results2 = m2.simulate_to( + data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP + ) # Step 4: Compare model to original model - print('Comparing results...') - data.outputs.plot(title='original model') - results2.outputs.plot(title='generated model') + print("Comparing results...") + data.outputs.plot(title="original model") + results2.outputs.plot(title="generated model") plt.show() # ----------------------------------------------------- - # Example 2- variable timestep + # Example 2- variable timestep # Here we will create a model to work with any timestep # We do this by adding timestep as a variable in the model # ----------------------------------------------------- # Step 1: Generate additional data - # We will use data generated above, but we also want data at additional timesteps - print('\n------------------------------------------\nExample 2...') - print('Generating additional data...') - data_half = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2) - data_quarter = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4) - data_twice = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2) - data_four = m.simulate_to_threshold(future_loading, events='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4) + # We will use data generated above, but we also want data at additional timesteps + print("\n------------------------------------------\nExample 2...") + print("Generating additional data...") + data_half = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 2, dt=TIMESTEP / 2 + ) + data_quarter = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP / 4, dt=TIMESTEP / 4 + ) + data_twice = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 2, dt=TIMESTEP * 2 + ) + data_four = m.simulate_to_threshold( + future_loading, events="impact", save_freq=TIMESTEP * 4, dt=TIMESTEP * 4 + ) # Step 2: Data Prep # We need to add the timestep as a input u = np.array([[TIMESTEP] for _ in data.inputs]) - u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs]) - u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs]) - u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs]) - u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs]) + u_half = np.array([[TIMESTEP / 2] for _ in data_half.inputs]) + u_quarter = np.array([[TIMESTEP / 4] for _ in data_quarter.inputs]) + u_twice = np.array([[TIMESTEP * 2] for _ in data_twice.inputs]) + u_four = np.array([[TIMESTEP * 4] for _ in data_four.inputs]) input_data = [u, u_half, u_quarter, u_twice, u_four] - output_data = [data.outputs, data_half.outputs, data_quarter.outputs, data_twice.outputs, data_four.outputs] + output_data = [ + data.outputs, + data_half.outputs, + data_quarter.outputs, + data_twice.outputs, + data_four.outputs, + ] # Step 3: Generate Model - print('Building model...') + print("Building model...") m3 = LSTMStateTransitionModel.from_data( inputs=input_data, outputs=output_data, window=4, epochs=30, - input_keys=['dt'], - output_keys=['x']) + input_keys=["dt"], + output_keys=["x"], + ) # Note, since we're generating from a model, we could also have done this: # m3 = LSTMStateTransitionModel.from_model( # m, @@ -134,21 +155,25 @@ def future_loading2(t, x=None): def future_loading3(t, x=None): nonlocal t_counter, x_counter - z = m3.InputContainer({'x_t-1': x_counter['x'], 'dt': t - t_counter}) + z = m3.InputContainer({"x_t-1": x_counter["x"], "dt": t - t_counter}) x_counter = m.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z # Use new dt, not used in training - # Using a dt not used in training will demonstrate the model's + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set - data = m.simulate_to(data.times[-1], future_loading, dt=TIMESTEP*3, save_freq=TIMESTEP*3) - results3 = m3.simulate_to(data.times[-1], future_loading3, dt=TIMESTEP*3, save_freq=TIMESTEP*3) + data = m.simulate_to( + data.times[-1], future_loading, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) + results3 = m3.simulate_to( + data.times[-1], future_loading3, dt=TIMESTEP * 3, save_freq=TIMESTEP * 3 + ) # Step 5: Compare Results - print('Comparing results...') - data.outputs.plot(title='original model') - results3.outputs.plot(title='generated model') + print("Comparing results...") + data.outputs.plot(title="original model") + results3.outputs.plot(title="generated model") plt.show() # ----------------------------------------------------- @@ -157,10 +182,13 @@ def future_loading3(t, x=None): # For this example we will use the BatteryElectroChemEOD model # We also include the event state (SOC) # ----------------------------------------------------- - print('\n------------------------------------------\nExample 3...') - print('Generating data...') + print("\n------------------------------------------\nExample 3...") + print("Generating data...") batt = BatteryElectroChemEOD(process_noise=0, measurement_noise=0) - future_loading_eqns = [lambda t, x=None, load=load: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)] + future_loading_eqns = [ + lambda t, x=None, load=load: batt.InputContainer({"i": 1 + 1.5 * load}) + for load in range(6) + ] # Generate data with different loading and step sizes # Adding the step size as an element of the output input_data = [] @@ -168,18 +196,23 @@ def future_loading3(t, x=None): es_data = [] t_met_data = [] for i in range(9): - dt = i/3+0.25 + dt = i / 3 + 0.25 for loading_eqn in future_loading_eqns: d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt) - input_data.append(np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float)) + input_data.append( + np.array( + [np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], + dtype=float, + ) + ) output_data.append(d.outputs) es_data.append(d.event_states) - t_met = [[False]for _ in d.times] + t_met = [[False] for _ in d.times] t_met[-1][0] = True # Threshold has been met at the last timestep t_met_data.append(t_met) # Step 2: Generate Model - print('Building model...') + print("Building model...") m_batt = LSTMStateTransitionModel.from_data( inputs=input_data, outputs=output_data, @@ -188,9 +221,10 @@ def future_loading3(t, x=None): window=12, epochs=10, units=64, # Additional units given the increased complexity of the system - input_keys=['i', 'dt'], - output_keys=['t', 'v'], - event_keys=['EOD']) + input_keys=["i", "dt"], + output_keys=["t", "v"], + event_keys=["EOD"], + ) # Take a look at the training history. m_batt.plot_history() @@ -201,32 +235,35 @@ def future_loading3(t, x=None): x_counter = batt.initialize() def future_loading(t, x=None): - return batt.InputContainer({'i': 3}) + return batt.InputContainer({"i": 3}) - def future_loading2(t, x = None): + def future_loading2(t, x=None): nonlocal t_counter, x_counter z = batt.output(x_counter) - z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter}) + z = m_batt.InputContainer( + {"i": 3, "t_t-1": z["t"], "v_t-1": z["v"], "dt": t - t_counter} + ) x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter) t_counter = t return z - # Use a new dt, not used in training. - # Using a dt not used in training will demonstrate the model's + # Use a new dt, not used in training. + # Using a dt not used in training will demonstrate the model's # ability to handle different timesteps not part of training set data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1) results = m_batt.simulate_to_threshold(future_loading2, dt=1, save_freq=1) # Step 5: Compare Results - print('Comparing results...') - data.outputs.plot(title='original model', compact=False) - results.outputs.plot(title='generated model', compact=False) - data.event_states.plot(title='original model', compact=False) - results.event_states.plot(title='generated model', compact=False) + print("Comparing results...") + data.outputs.plot(title="original model", compact=False) + results.outputs.plot(title="generated model", compact=False) + data.event_states.plot(title="original model", compact=False) + results.event_states.plot(title="generated model", compact=False) plt.show() # This last example isn't a perfect fit, but it matches the behavior # well, especially the voltage curve -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/e94f95102fd9281aa7355e7b9ceeb60a/benchmarking.py b/docs/_downloads/e94f95102fd9281aa7355e7b9ceeb60a/benchmarking.py index 8756c210..a57ac19c 100644 --- a/docs/_downloads/e94f95102fd9281aa7355e7b9ceeb60a/benchmarking.py +++ b/docs/_downloads/e94f95102fd9281aa7355e7b9ceeb60a/benchmarking.py @@ -8,25 +8,30 @@ from progpy.models import BatteryCircuit from timeit import timeit + def run_example(): # Step 1: Create a model object batt = BatteryCircuit() - - # Step 2: Define future loading function - loading = batt.InputContainer({'i': 2}) # Constant loading + + # Step 2: Define future loading function + loading = batt.InputContainer({"i": 2}) # Constant loading + def future_loading(t, x=None): # Constant Loading return loading # Step 3: Benchmark simulation of 600 seconds - print('Benchmarking...') - def sim(): + print("Benchmarking...") + + def sim(): batt.simulate_to(600, future_loading) + time = timeit(sim, number=500) # Print results - print('Simulation Time: {} ms/sim'.format(time)) + print("Simulation Time: {} ms/sim".format(time)) + -# This allows the module to be executed directly -if __name__=='__main__': +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/ea97230ef09e27d10dd5f70f57cc729a/predict_specific_event.py b/docs/_downloads/ea97230ef09e27d10dd5f70f57cc729a/predict_specific_event.py new file mode 100644 index 00000000..72c01a91 --- /dev/null +++ b/docs/_downloads/ea97230ef09e27d10dd5f70f57cc729a/predict_specific_event.py @@ -0,0 +1,42 @@ +# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + +""" +In this example we are using the UTPredictor to predict a specific event, in this case impact. This will then ignore the other events which are not of interest. +""" + +from progpy import state_estimators, predictors +from progpy.models.thrown_object import ThrownObject + + +def run_example(): + ## Setup + m = ThrownObject() + initial_state = m.initialize() + load = m.InputContainer({}) # Optimization - create once + + ## State Estimation - perform a single ukf state estimate step + filt = state_estimators.UnscentedKalmanFilter(m, initial_state) + filt.estimate(0.1, {}, m.output(initial_state)) + + ## Prediction - Predict EOD given current state + # Setup prediction + pred = predictors.UnscentedTransformPredictor(m) + + # Predict with a step size of 0.1 + mc_results = pred.predict(filt.x, dt=0.1, save_freq=1, events=["impact"]) + + # Print Results + for i, time in enumerate(mc_results.times): + print("\nt = {}".format(time)) + print("\tu = {}".format(mc_results.inputs.snapshot(i).mean)) + print("\tx = {}".format(mc_results.states.snapshot(i).mean)) + print("\tz = {}".format(mc_results.outputs.snapshot(i).mean)) + print("\tevent state = {}".format(mc_results.states.snapshot(i).mean)) + + # Note only impact event is shown here + print("\nToE:", mc_results.time_of_event.mean) + + +# This allows the module to be executed directly +if __name__ == "__main__": + run_example() diff --git a/docs/_downloads/ecc8413bae90e86db76ac158699502d4/growth.ipynb b/docs/_downloads/ecc8413bae90e86db76ac158699502d4/growth.ipynb index fabe2d29..12b445ca 100644 --- a/docs/_downloads/ecc8413bae90e86db76ac158699502d4/growth.ipynb +++ b/docs/_downloads/ecc8413bae90e86db76ac158699502d4/growth.ipynb @@ -1,54 +1,144 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating the Paris Law Crack Growth Equation\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models.paris_law import ParisLawCrackGrowth \nimport matplotlib.pyplot as plt\nimport csv\nimport os\n\ndef run_example(): \n # Step 1: Create a model object\n m = ParisLawCrackGrowth(process_noise = 0)\n \n # Step 2: Define future loading function \n def future_loading(t, x=None):\n #variable (piece-wise) future loading scheme \n #inputs are ['k_min', 'k_max']\n if (t < 500):\n k_min = 12\n k_max = 24\n elif (t < 750):\n k_min = 8\n k_max = 32\n else:\n k_min = 0\n k_max = 28\n return m.InputContainer({'k_min': k_min, 'k_max': k_max})\n\n # Step 3: Estimate parameters\n # We do not know the model parameters for this system, \n # so we need to estimate it using data collected from the system\n # First we have to import some data from the real system\n # This is what we use to estimate parameters\n times = []\n inputs = []\n outputs = []\n\n #Finds file path\n csv_dir = os.path.join(os.path.dirname(__file__), 'growth.csv')\n\n #Reads csv file\n try:\n with open(csv_dir, newline='') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|' , quoting=csv.QUOTE_NONNUMERIC)\n for row in data:\n times.append(row[0])\n inputs.append({'k_min': row[1], 'k_max': row[2]})\n outputs.append({'c_l': row[3]})\n except FileNotFoundError:\n print(\"No data file found\")\n\n # Estimates the model parameters\n keys = ['c', 'm']\n\n print('Model configuration before')\n for key in keys:\n print(\"-\", key, m.parameters[key])\n print(' Error: ', m.calc_error(times, inputs, outputs, dt=10))\n\n m.estimate_params([(times, inputs, outputs)], keys, dt=10)\n\n print('\\nOptimized configuration')\n for key in keys:\n print(\"-\", key, m.parameters[key])\n print(' Error: ', m.calc_error(times, inputs, outputs, dt=10))\n\n # Step 4: Simulate to threshold\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n options = {\n 'save_freq': 10, # Frequency at which results are saved\n 'dt': 10, # Timestep\n 'print': True,\n 'horizon': 1e5, # Horizon\n }\n\n (times, inputs, _, outputs, event_states) = m.simulate_to_threshold(future_loading, **options)\n\n # Step 5: Plot Results\n # crack length\n # plot event state\n\n inputs.plot(ylabel='Stress Intensity')\n event_states.plot(ylabel= 'CGF')\n outputs.plot(ylabel= {'c_l': \"Crack Length\"}, compact= False)\n plt.show()\n\nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating the Paris Law Crack Growth Equation\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models.paris_law import ParisLawCrackGrowth\n", + "import matplotlib.pyplot as plt\n", + "import csv\n", + "import os\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Create a model object\n", + " m = ParisLawCrackGrowth(process_noise=0)\n", + "\n", + " # Step 2: Define future loading function\n", + " def future_loading(t, x=None):\n", + " # variable (piece-wise) future loading scheme\n", + " # inputs are ['k_min', 'k_max']\n", + " if t < 500:\n", + " k_min = 12\n", + " k_max = 24\n", + " elif t < 750:\n", + " k_min = 8\n", + " k_max = 32\n", + " else:\n", + " k_min = 0\n", + " k_max = 28\n", + " return m.InputContainer({\"k_min\": k_min, \"k_max\": k_max})\n", + "\n", + " # Step 3: Estimate parameters\n", + " # We do not know the model parameters for this system,\n", + " # so we need to estimate it using data collected from the system\n", + " # First we have to import some data from the real system\n", + " # This is what we use to estimate parameters\n", + " times = []\n", + " inputs = []\n", + " outputs = []\n", + "\n", + " # Finds file path\n", + " csv_dir = os.path.join(os.path.dirname(__file__), \"growth.csv\")\n", + "\n", + " # Reads csv file\n", + " try:\n", + " with open(csv_dir, newline=\"\") as csvfile:\n", + " data = csv.reader(\n", + " csvfile, delimiter=\",\", quotechar=\"|\", quoting=csv.QUOTE_NONNUMERIC\n", + " )\n", + " for row in data:\n", + " times.append(row[0])\n", + " inputs.append({\"k_min\": row[1], \"k_max\": row[2]})\n", + " outputs.append({\"c_l\": row[3]})\n", + " except FileNotFoundError:\n", + " print(\"No data file found\")\n", + "\n", + " # Estimates the model parameters\n", + " keys = [\"c\", \"m\"]\n", + "\n", + " print(\"Model configuration before\")\n", + " for key in keys:\n", + " print(\"-\", key, m.parameters[key])\n", + " print(\" Error: \", m.calc_error(times, inputs, outputs, dt=10))\n", + "\n", + " m.estimate_params([(times, inputs, outputs)], keys, dt=10)\n", + "\n", + " print(\"\\nOptimized configuration\")\n", + " for key in keys:\n", + " print(\"-\", key, m.parameters[key])\n", + " print(\" Error: \", m.calc_error(times, inputs, outputs, dt=10))\n", + "\n", + " # Step 4: Simulate to threshold\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " options = {\n", + " \"save_freq\": 10, # Frequency at which results are saved\n", + " \"dt\": 10, # Timestep\n", + " \"print\": True,\n", + " \"horizon\": 1e5, # Horizon\n", + " }\n", + "\n", + " (times, inputs, _, outputs, event_states) = m.simulate_to_threshold(\n", + " future_loading, **options\n", + " )\n", + "\n", + " # Step 5: Plot Results\n", + " # crack length\n", + " # plot event state\n", + "\n", + " inputs.plot(ylabel=\"Stress Intensity\")\n", + " event_states.plot(ylabel=\"CGF\")\n", + " outputs.plot(ylabel={\"c_l\": \"Crack Length\"}, compact=False)\n", + " plt.show()\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/ecd38bb3b1339cc33c6c2b46ac42acab/dynamic_step_size.ipynb b/docs/_downloads/ecd38bb3b1339cc33c6c2b46ac42acab/dynamic_step_size.ipynb index 2dac8abd..29c3359e 100644 --- a/docs/_downloads/ecd38bb3b1339cc33c6c2b46ac42acab/dynamic_step_size.ipynb +++ b/docs/_downloads/ecd38bb3b1339cc33c6c2b46ac42acab/dynamic_step_size.ipynb @@ -1,54 +1,117 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import prog_models\nfrom progpy.models.thrown_object import ThrownObject\n\ndef run_example():\n print(\"EXAMPLE 1: dt of 1 until 8 sec, then 0.5\\n\\nSetting up...\\n\")\n # Step 1: Create instance of model\n m = ThrownObject()\n\n # Step 2: Setup for simulation \n def future_load(t, x=None):\n return {}\n\n # Step 3: Define dynamic step size function\n # This `next_time` function will specify what the next step of the simulation should be at any state and time. \n # f(x, t) -> (t, dt)\n def next_time(t, x):\n # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5 \n if t < 8:\n return 1\n return 0.5\n\n # Step 4: Simulate to impact\n # Here we're printing every time step so we can see the step size change\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact'])\n\n # Example 2\n print(\"EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \\n\\nSetting up...\\n\")\n\n # Step 3: Define dynamic step size function\n # This `next_time` function will specify what the next step of the simulation should be at any state and time. \n # f(x, t) -> (t, dt)\n def next_time(t, x):\n # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.5, then 0.25\n event_state = m.event_state(x)\n if event_state['impact'] < 0.5:\n return 0.25\n return 1\n\n # Step 4: Simulate to impact\n # Here we're printing every time step so we can see the step size change\n print('\\n\\n------------------------------------------------')\n print('Simulating to threshold\\n\\n')\n (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact'])\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models.thrown_object import ThrownObject\n", + "\n", + "\n", + "def run_example():\n", + " print(\"EXAMPLE 1: dt of 1 until 8 sec, then 0.5\\n\\nSetting up...\\n\")\n", + " # Step 1: Create instance of model\n", + " m = ThrownObject()\n", + "\n", + " # Step 2: Setup for simulation\n", + " def future_load(t, x=None):\n", + " return {}\n", + "\n", + " # Step 3: Define dynamic step size function\n", + " # This `next_time` function will specify what the next step of the simulation should be at any state and time.\n", + " # f(x, t) -> (t, dt)\n", + " def next_time(t, x):\n", + " # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5\n", + " if t < 8:\n", + " return 1\n", + " return 0.5\n", + "\n", + " # Step 4: Simulate to impact\n", + " # Here we're printing every time step so we can see the step size change\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(\n", + " future_load,\n", + " save_freq=1e-99,\n", + " print=True,\n", + " dt=next_time,\n", + " threshold_keys=[\"impact\"],\n", + " )\n", + "\n", + " # Example 2\n", + " print(\n", + " \"EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \\n\\nSetting up...\\n\"\n", + " )\n", + "\n", + " # Step 3: Define dynamic step size function\n", + " # This `next_time` function will specify what the next step of the simulation should be at any state and time.\n", + " # f(x, t) -> (t, dt)\n", + " def next_time(t, x):\n", + " # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.5, then 0.25\n", + " event_state = m.event_state(x)\n", + " if event_state[\"impact\"] < 0.5:\n", + " return 0.25\n", + " return 1\n", + "\n", + " # Step 4: Simulate to impact\n", + " # Here we're printing every time step so we can see the step size change\n", + " print(\"\\n\\n------------------------------------------------\")\n", + " print(\"Simulating to threshold\\n\\n\")\n", + " (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(\n", + " future_load,\n", + " save_freq=1e-99,\n", + " print=True,\n", + " dt=next_time,\n", + " threshold_keys=[\"impact\"],\n", + " )\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/f10ab087990da5ce32635c5803a5e4a5/state_limits.ipynb b/docs/_downloads/f10ab087990da5ce32635c5803a5e4a5/state_limits.ipynb index b30a1766..4dd76432 100644 --- a/docs/_downloads/f10ab087990da5ce32635c5803a5e4a5/state_limits.ipynb +++ b/docs/_downloads/f10ab087990da5ce32635c5803a5e4a5/state_limits.ipynb @@ -1,54 +1,128 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating when and how to identify model state limits. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models.thrown_object import ThrownObject\nfrom math import inf\n\ndef run_example():\n # Demo model\n # Step 1: Create instance of model (without drag)\n m = ThrownObject( cd = 0 )\n\n # Step 2: Setup for simulation \n def future_load(t, x=None):\n return {}\n\n # add state limits\n m.state_limits = {\n # object may not go below ground height\n 'x': (0, inf),\n\n # object may not exceed the speed of light\n 'v': (-299792458, 299792458)\n }\n\n # Step 3: Simulate to impact\n event = 'impact'\n simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1)\n \n # Print states\n print('Example 1')\n for i, state in enumerate(simulated_results.states):\n print(f'State {i}: {state}')\n print()\n\n # Let's try setting x to a number outside of its bounds\n x0 = m.initialize(u = {}, z = {})\n x0['x'] = -1\n\n simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, x = x0)\n\n # Print states\n print('Example 2')\n for i, state in enumerate(simulated_results.states):\n print('State ', i, ': ', state)\n print()\n\n # Let's see what happens when the objects speed aproaches its limit\n x0 = m.initialize(u = {}, z = {})\n x0['x'] = 1000000000\n x0['v'] = 0\n m.parameters['g'] = -50000000\n \n print('Example 3')\n simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=0.3, x = x0, print = True, progress = False)\n\n # Note that the limits can also be applied manually using the apply_limits function\n print('limiting states')\n x = {'x': -5, 'v': 3e8} # Too fast and below the ground\n print('\\t Pre-limit: {}'.format(x))\n x = m.apply_limits(x)\n print('\\t Post-limit: {}'.format(x))\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating when and how to identify model state limits. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models.thrown_object import ThrownObject\n", + "from math import inf\n", + "\n", + "\n", + "def run_example():\n", + " # Demo model\n", + " # Step 1: Create instance of model (without drag)\n", + " m = ThrownObject(cd=0)\n", + "\n", + " # Step 2: Setup for simulation\n", + " def future_load(t, x=None):\n", + " return {}\n", + "\n", + " # add state limits\n", + " m.state_limits = {\n", + " # object may not go below ground height\n", + " \"x\": (0, inf),\n", + " # object may not exceed the speed of light\n", + " \"v\": (-299792458, 299792458),\n", + " }\n", + "\n", + " # Step 3: Simulate to impact\n", + " event = \"impact\"\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], dt=0.005, save_freq=1\n", + " )\n", + "\n", + " # Print states\n", + " print(\"Example 1\")\n", + " for i, state in enumerate(simulated_results.states):\n", + " print(f\"State {i}: {state}\")\n", + " print()\n", + "\n", + " # Let's try setting x to a number outside of its bounds\n", + " x0 = m.initialize(u={}, z={})\n", + " x0[\"x\"] = -1\n", + "\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load, threshold_keys=[event], dt=0.005, save_freq=1, x=x0\n", + " )\n", + "\n", + " # Print states\n", + " print(\"Example 2\")\n", + " for i, state in enumerate(simulated_results.states):\n", + " print(\"State \", i, \": \", state)\n", + " print()\n", + "\n", + " # Let's see what happens when the objects speed aproaches its limit\n", + " x0 = m.initialize(u={}, z={})\n", + " x0[\"x\"] = 1000000000\n", + " x0[\"v\"] = 0\n", + " m.parameters[\"g\"] = -50000000\n", + "\n", + " print(\"Example 3\")\n", + " simulated_results = m.simulate_to_threshold(\n", + " future_load,\n", + " threshold_keys=[event],\n", + " dt=0.005,\n", + " save_freq=0.3,\n", + " x=x0,\n", + " print=True,\n", + " progress=False,\n", + " )\n", + "\n", + " # Note that the limits can also be applied manually using the apply_limits function\n", + " print(\"limiting states\")\n", + " x = {\"x\": -5, \"v\": 3e8} # Too fast and below the ground\n", + " print(\"\\t Pre-limit: {}\".format(x))\n", + " x = m.apply_limits(x)\n", + " print(\"\\t Post-limit: {}\".format(x))\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/f138ed85c70cb3f7616eb0b5ba690cff/composite_model.py b/docs/_downloads/f138ed85c70cb3f7616eb0b5ba690cff/composite_model.py index 10fa6ca0..8ff7ce62 100644 --- a/docs/_downloads/f138ed85c70cb3f7616eb0b5ba690cff/composite_model.py +++ b/docs/_downloads/f138ed85c70cb3f7616eb0b5ba690cff/composite_model.py @@ -4,12 +4,13 @@ """ Example illustrating how to use the CompositeModel class to create a composite model from multiple models. -This example creates a composite model of a DC motor with an Electronic Speed Controller and a propeller load. The three composite models are interrelated. The created composite model describes the nature of these interconnections. The resulting powertrain model is then simulated forward with time and the results are plotted. +This example creates a composite model of a DC motor with an Electronic Speed Controller and a propeller load. The three composite models are interrelated. The created composite model describes the nature of these interconnections. The resulting powertrain model is then simulated forward with time and the results are plotted. """ from progpy.models import DCMotor, ESC, PropellerLoad from progpy import CompositeModel + def run_example(): # First, lets define the composite models m_motor = DCMotor() @@ -19,35 +20,37 @@ def run_example(): # Now let's combine them into a single composite model describing the behavior of a powertrain # This model will then behave as a single model m_powertrain = CompositeModel( - (m_esc, m_load, m_motor), - connections = [ - ('DCMotor.theta', 'ESC.theta'), - ('ESC.v_a', 'DCMotor.v_a'), - ('ESC.v_b', 'DCMotor.v_b'), - ('ESC.v_c', 'DCMotor.v_c'), - ('PropellerLoad.t_l', 'DCMotor.t_l'), - ('DCMotor.v_rot', 'PropellerLoad.v_rot')], - outputs = {'DCMotor.v_rot', 'DCMotor.theta'}) - + (m_esc, m_load, m_motor), + connections=[ + ("DCMotor.theta", "ESC.theta"), + ("ESC.v_a", "DCMotor.v_a"), + ("ESC.v_b", "DCMotor.v_b"), + ("ESC.v_c", "DCMotor.v_c"), + ("PropellerLoad.t_l", "DCMotor.t_l"), + ("DCMotor.v_rot", "PropellerLoad.v_rot"), + ], + outputs={"DCMotor.v_rot", "DCMotor.theta"}, + ) + # Print out the inputs, states, and outputs of the composite model - print('Composite model of DCMotor, ESC, and Propeller load') - print('inputs: ', m_powertrain.inputs) - print('states: ', m_powertrain.states) - print('outputs: ', m_powertrain.outputs) + print("Composite model of DCMotor, ESC, and Propeller load") + print("inputs: ", m_powertrain.inputs) + print("states: ", m_powertrain.states) + print("outputs: ", m_powertrain.outputs) # Define future loading function - 100% duty all the time def future_loading(t, x=None): - return m_powertrain.InputContainer({ - 'ESC.duty': 1, - 'ESC.v': 23 - }) - + return m_powertrain.InputContainer({"ESC.duty": 1, "ESC.v": 23}) + # Simulate to threshold - print('\n\n------------------------------------------------') - print('Simulating to threshold\n\n') - simulated_results = m_powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True) + print("\n\n------------------------------------------------") + print("Simulating to threshold\n\n") + simulated_results = m_powertrain.simulate_to( + 2, future_loading, dt=2e-5, save_freq=0.1, print=True + ) simulated_results.outputs.plot() -if __name__ == '__main__': + +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/f18535a152570e60fa41f5fba932422a/future_loading.py b/docs/_downloads/f18535a152570e60fa41f5fba932422a/future_loading.py index 78f72ebb..6519f1de 100644 --- a/docs/_downloads/f18535a152570e60fa41f5fba932422a/future_loading.py +++ b/docs/_downloads/f18535a152570e60fa41f5fba932422a/future_loading.py @@ -2,7 +2,7 @@ # National Aeronautics and Space Administration. All Rights Reserved. """ -Example demonstrating ways to use future loading. +Example demonstrating ways to use future loading. """ import matplotlib.pyplot as plt @@ -11,28 +11,32 @@ from progpy.models import BatteryCircuit from statistics import mean -def run_example(): + +def run_example(): m = BatteryCircuit() ## Example 1: Variable (piecewise) loading future_loading = Piecewise( - m.InputContainer, - [600, 900, 1800, 3000, float('inf')], - {'i': [2, 1, 4, 2, 3]}) - + m.InputContainer, [600, 900, 1800, 3000, float("inf")], {"i": [2, 1, 4, 2, 3]} + ) + # Simulate to threshold options = { - 'save_freq': 100, # Frequency at which results are saved - 'dt': 2 # Timestep + "save_freq": 100, # Frequency at which results are saved + "dt": 2, # Timestep } simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Variable Load Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Variable Load Event State', xlabel='time (s)') - - ## Example 2: Moving Average loading - # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system, + simulated_results.inputs.plot( + ylabel="Variable Load Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Variable Load Event State", xlabel="time (s)" + ) + + ## Example 2: Moving Average loading + # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system, # but don't have a good way of predicting it, and you expect loading to be steady from progpy.loading import MovingAverage @@ -40,54 +44,83 @@ def run_example(): future_loading = MovingAverage(m.InputContainer) # Now lets say you have some measured loads to add - measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2] - + measured_loads = [ + 10, + 11.5, + 12.0, + 8, + 2.1, + 1.8, + 1.99, + 2.0, + 2.01, + 1.89, + 1.92, + 2.01, + 2.1, + 2.2, + ] + # We're going to feed these into the future loading eqn for load in measured_loads: - future_loading.add_load({'i': load}) - + future_loading.add_load({"i": load}) + # Now the future_loading eqn is setup to use the moving average of whats been seen # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Moving Average Event State', xlabel='time (s)') + simulated_results.inputs.plot( + ylabel="Moving Average Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Moving Average Event State", xlabel="time (s)" + ) # In this case, this estimate is wrong because loading will not be steady, but at least it would give you an approximation. - # If more measurements are received, the user could estimate the moving average here and then run a new simulation. + # If more measurements are received, the user could estimate the moving average here and then run a new simulation. - ## Example 3: Gaussian Distribution - # In this example we will still be doing a variable loading like the first option, but we are going to use a + ## Example 3: Gaussian Distribution + # In this example we will still be doing a variable loading like the first option, but we are going to use a # gaussian distribution for each input. future_loading = Piecewise( - m.InputContainer, - [600, 900, 1800, 3000, float('inf')], - {'i': [2, 1, 4, 2, 3]}) + m.InputContainer, [600, 900, 1800, 3000, float("inf")], {"i": [2, 1, 4, 2, 3]} + ) future_loading_with_noise = GaussianNoiseLoadWrapper(future_loading, 0.2) # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading_with_noise, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Variable Gaussian Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Variable Gaussian Event State', xlabel='time (s)') + simulated_results.inputs.plot( + ylabel="Variable Gaussian Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Variable Gaussian Event State", xlabel="time (s)" + ) # Example 4: Gaussian- increasing with time - # For this we're using moving average. This is realistic because the further out from current time you get, - # the more uncertainty there is in your prediction. + # For this we're using moving average. This is realistic because the further out from current time you get, + # the more uncertainty there is in your prediction. def future_loading(t, x=None): - std = future_loading.base_std + future_loading.std_slope * (t - future_loading.t) - return {key : normal(future_loading.load[key], std) for key in future_loading.load.keys()} - future_loading.load = {key : 0 for key in m.inputs} + std = future_loading.base_std + future_loading.std_slope * ( + t - future_loading.t + ) + return { + key: normal(future_loading.load[key], std) + for key in future_loading.load.keys() + } + + future_loading.load = {key: 0 for key in m.inputs} future_loading.base_std = 0.001 future_loading.std_slope = 1e-4 future_loading.t = 0 # Lets define another function to handle the moving average logic window = 10 # Number of elements in window + def moving_avg(i): for key in m.inputs: moving_avg.loads[key].append(i[key]) @@ -95,51 +128,78 @@ def moving_avg(i): del moving_avg.loads[key][0] # Remove first item # Update future loading eqn - future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs} - moving_avg.loads = {key : [] for key in m.inputs} + future_loading.load = {key: mean(moving_avg.loads[key]) for key in m.inputs} - # OK, we've setup the logic of the moving average. + moving_avg.loads = {key: [] for key in m.inputs} + + # OK, we've setup the logic of the moving average. # Now lets say you have some measured loads to add - measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2] - + measured_loads = [ + 10, + 11.5, + 12.0, + 8, + 2.1, + 1.8, + 1.99, + 2.0, + 2.01, + 1.89, + 1.92, + 2.01, + 2.1, + 2.2, + ] + # We're going to feed these into the future loading eqn for load in measured_loads: - moving_avg({'i': load}) + moving_avg({"i": load}) # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'Moving Average Event State', xlabel='time (s)') - + simulated_results.inputs.plot( + ylabel="Moving Average Current (amps)", xlabel="time (s)" + ) + simulated_results.event_states.plot( + ylabel="Moving Average Event State", xlabel="time (s)" + ) + # In this example future_loading.t has to be updated with current time before each prediction. - + # Example 5 Function of state # here we're pretending that input is a function of SOC. It increases as we approach SOC def future_loading(t, x=None): if x is not None: event_state = future_loading.event_state(x) - return m.InputContainer({'i': future_loading.start + (1-event_state['EOD']) * future_loading.slope}) # default - return m.InputContainer({'i': future_loading.start}) + return m.InputContainer( + { + "i": future_loading.start + + (1 - event_state["EOD"]) * future_loading.slope + } + ) # default + return m.InputContainer({"i": future_loading.start}) + future_loading.t = 0 future_loading.event_state = m.event_state - future_loading.slope = 2 # difference between input with EOD = 1 and 0. + future_loading.slope = 2 # difference between input with EOD = 1 and 0. future_loading.start = 0.5 # Simulate to threshold simulated_results = m.simulate_to_threshold(future_loading, **options) # Now lets plot the inputs and event_states - simulated_results.inputs.plot(ylabel = 'f(x) Current (amps)', xlabel='time (s)') - simulated_results.event_states.plot(ylabel = 'f(x) Event State', xlabel='time (s)') + simulated_results.inputs.plot(ylabel="f(x) Current (amps)", xlabel="time (s)") + simulated_results.event_states.plot(ylabel="f(x) Event State", xlabel="time (s)") # In this example future_loading.t has to be updated with current time before each prediction. # Show plots plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/f2b2d938e4cca8a4e5b136e0ca057d3b/sim_pump.ipynb b/docs/_downloads/f2b2d938e4cca8a4e5b136e0ca057d3b/sim_pump.ipynb index 9fc669ba..160dc3c3 100644 --- a/docs/_downloads/f2b2d938e4cca8a4e5b136e0ca057d3b/sim_pump.ipynb +++ b/docs/_downloads/f2b2d938e4cca8a4e5b136e0ca057d3b/sim_pump.ipynb @@ -1,54 +1,130 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample of a centrifugal pump being simulated until threshold is met. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models import CentrifugalPump\n\ndef run_example(): \n # Step 1: Setup Pump\n pump = CentrifugalPump(process_noise= 0)\n pump.parameters['x0']['wA'] = 0.01 # Set Wear Rate\n\n # Step 2: Setup Future Loading\n cycle_time = 3600\n def future_loading(t, x=None):\n t = t % cycle_time\n if t < cycle_time/2.0:\n V = 471.2389\n elif t < cycle_time/2 + 100:\n V = 471.2389 + (t-cycle_time/2)\n elif t < cycle_time - 100:\n V = 571.2389\n else:\n V = 471.2398 - (t-cycle_time)\n\n return pump.InputContainer({\n 'Tamb': 290,\n 'V': V,\n 'pdisch': 928654, \n 'psuc': 239179, \n 'wsync': V * 0.8\n })\n\n # Step 3: Sim\n first_output = pump.output(pump.initialize(future_loading(0),{}))\n config = {\n 'horizon': 1e5,\n 'save_freq': 1e3,\n 'print': True\n }\n simulated_results = pump.simulate_to_threshold(future_loading, first_output, **config)\n\n # Step 4: Plot Results\n from progpy.visualize import plot_timeseries\n plot_timeseries(simulated_results.times, simulated_results.inputs, options={'compact': False, 'title': 'Inputs',\n 'xlabel': 'time', 'ylabel':{lbl: lbl for lbl in pump.inputs}})\n plot_timeseries(simulated_results.times, simulated_results.states, options={'compact': False, 'title': 'States', 'xlabel': 'time', 'ylabel': ''})\n plot_timeseries(simulated_results.times, simulated_results.outputs, options={'compact': False, 'title': 'Outputs', 'xlabel': 'time', 'ylabel': ''})\n plot_timeseries(simulated_results.times, simulated_results.event_states, options={'compact': False, 'title': 'Events', 'xlabel': 'time', 'ylabel': ''})\n thresholds_met = [pump.threshold_met(x) for x in simulated_results.states]\n plot_timeseries(simulated_results.times, thresholds_met, options={'compact': True, 'title': 'Events', 'xlabel': 'time', 'ylabel': ''}, legend = {'display': True})\n\n import matplotlib.pyplot as plt \n plt.show()\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample of a centrifugal pump being simulated until threshold is met. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models import CentrifugalPump\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Setup Pump\n", + " pump = CentrifugalPump(process_noise=0)\n", + " pump.parameters[\"x0\"][\"wA\"] = 0.01 # Set Wear Rate\n", + "\n", + " # Step 2: Setup Future Loading\n", + " cycle_time = 3600\n", + "\n", + " def future_loading(t, x=None):\n", + " t = t % cycle_time\n", + " if t < cycle_time / 2.0:\n", + " V = 471.2389\n", + " elif t < cycle_time / 2 + 100:\n", + " V = 471.2389 + (t - cycle_time / 2)\n", + " elif t < cycle_time - 100:\n", + " V = 571.2389\n", + " else:\n", + " V = 471.2398 - (t - cycle_time)\n", + "\n", + " return pump.InputContainer(\n", + " {\"Tamb\": 290, \"V\": V, \"pdisch\": 928654, \"psuc\": 239179, \"wsync\": V * 0.8}\n", + " )\n", + "\n", + " # Step 3: Sim\n", + " first_output = pump.output(pump.initialize(future_loading(0), {}))\n", + " config = {\"horizon\": 1e5, \"save_freq\": 1e3, \"print\": True}\n", + " simulated_results = pump.simulate_to_threshold(\n", + " future_loading, first_output, **config\n", + " )\n", + "\n", + " # Step 4: Plot Results\n", + " from progpy.visualize import plot_timeseries\n", + "\n", + " plot_timeseries(\n", + " simulated_results.times,\n", + " simulated_results.inputs,\n", + " options={\n", + " \"compact\": False,\n", + " \"title\": \"Inputs\",\n", + " \"xlabel\": \"time\",\n", + " \"ylabel\": {lbl: lbl for lbl in pump.inputs},\n", + " },\n", + " )\n", + " plot_timeseries(\n", + " simulated_results.times,\n", + " simulated_results.states,\n", + " options={\"compact\": False, \"title\": \"States\", \"xlabel\": \"time\", \"ylabel\": \"\"},\n", + " )\n", + " plot_timeseries(\n", + " simulated_results.times,\n", + " simulated_results.outputs,\n", + " options={\"compact\": False, \"title\": \"Outputs\", \"xlabel\": \"time\", \"ylabel\": \"\"},\n", + " )\n", + " plot_timeseries(\n", + " simulated_results.times,\n", + " simulated_results.event_states,\n", + " options={\"compact\": False, \"title\": \"Events\", \"xlabel\": \"time\", \"ylabel\": \"\"},\n", + " )\n", + " thresholds_met = [pump.threshold_met(x) for x in simulated_results.states]\n", + " plot_timeseries(\n", + " simulated_results.times,\n", + " thresholds_met,\n", + " options={\"compact\": True, \"title\": \"Events\", \"xlabel\": \"time\", \"ylabel\": \"\"},\n", + " legend={\"display\": True},\n", + " )\n", + "\n", + " import matplotlib.pyplot as plt\n", + "\n", + " plt.show()\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/f39895005dabcb4262926b776151bd9a/eol_event.py b/docs/_downloads/f39895005dabcb4262926b776151bd9a/eol_event.py index ef9e8d73..bb51b129 100644 --- a/docs/_downloads/f39895005dabcb4262926b776151bd9a/eol_event.py +++ b/docs/_downloads/f39895005dabcb4262926b776151bd9a/eol_event.py @@ -4,7 +4,7 @@ Method: An instance of ThrownObject is used for this example. In this case it is trivial because the event 'falling' will always occur before 'impact', but for some other models that might not be true. The ThrownObject class is subclassed to add a new event 'EOL' which occurs if any other event occurs. The new model is then instantiated and used for prognostics like in basic_example. Prediction specifically specifies EOL as the event to be predicted. -Results: +Results: i) Predicted future values (inputs, states, outputs, event_states) with uncertainty from prediction ii) Time the event 'EOL' is predicted to occur (with uncertainty) @@ -16,35 +16,40 @@ from progpy.predictors import MonteCarlo from progpy.uncertain_data import ScalarData + def run_example(): - # Step 1: Define subclass with EOL event + # Step 1: Define subclass with EOL event # Similar to the progpy 'events' example, but with an EOL event class ThrownObjectWithEOL(ThrownObject): - events = ThrownObject.events + ['EOL'] + events = ThrownObject.events + ["EOL"] def event_state(self, x): es = super().event_state(x) # Add EOL Event (minimum event state) - es['EOL'] = min(list(es.values())) + es["EOL"] = min(list(es.values())) return es - + def threshold_met(self, x): t_met = super().threshold_met(x) # Add EOL Event (if any events have occured) - t_met['EOL'] = any(list(t_met.values())) + t_met["EOL"] = any(list(t_met.values())) return t_met - + # Step 2: Create instance of subclass m = ThrownObjectWithEOL(process_noise=1) # Step 3: Setup for prediction pred = MonteCarlo(m) + def future_loading(t=None, x=None): return {} # No future loading for ThrownObject + state = ScalarData(m.initialize()) # Step 4: Predict to EOL event - pred_results = pred.predict(state, future_loading, events=['EOL'], dt=0.01, n_samples=50) + pred_results = pred.predict( + state, future_loading, events=["EOL"], dt=0.01, n_samples=50 + ) # In this case EOL is when the object starts falling # But for some models where events aren't sequential, there might be a mixture of events in the EOL @@ -52,6 +57,7 @@ def future_loading(t=None, x=None): pred_results.time_of_event.plot_hist() plt.show() -# This allows the module to be executed directly -if __name__ == '__main__': + +# This allows the module to be executed directly +if __name__ == "__main__": run_example() diff --git a/docs/_downloads/fa4613fdace3268a5393e41d4d22b775/param_est.ipynb b/docs/_downloads/fa4613fdace3268a5393e41d4d22b775/param_est.ipynb index 32e1cbaf..d7f5d6d5 100644 --- a/docs/_downloads/fa4613fdace3268a5393e41d4d22b775/param_est.ipynb +++ b/docs/_downloads/fa4613fdace3268a5393e41d4d22b775/param_est.ipynb @@ -1,54 +1,100 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample demonstrating the model parameter estimation feature. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from progpy.models.thrown_object import ThrownObject\n\ndef run_example():\n # Step 1: Build the model with your best guess in parameters\n # Here we're guessing that the thrower is 20 meters tall. Obviously not true!\n # Let's see if parameter estimation can fix this\n m = ThrownObject(thrower_height=20)\n\n # Step 2: Collect data from the use of the system. Let's pretend we threw the ball once, and collected position measurements \n times = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n inputs = [{}]*9\n outputs = [\n {'x': 1.83},\n {'x': 36.95},\n {'x': 62.36},\n {'x': 77.81},\n {'x': 83.45},\n {'x': 79.28},\n {'x': 65.3},\n {'x': 41.51},\n {'x': 7.91},\n ]\n\n # Step 3: Identify the parameters to be estimated\n keys = ['thrower_height', 'throwing_speed']\n\n # Printing state before\n print('Model configuration before')\n for key in keys:\n print(\"-\", key, m.parameters[key])\n print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4))\n\n # Step 4: Run parameter estimation with data\n m.estimate_params([(times, inputs, outputs)], keys, dt=0.01)\n\n # Print result\n print('\\nOptimized configuration')\n for key in keys:\n print(\"-\", key, m.parameters[key])\n print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4))\n \n # Sure enough- parameter estimation determined that the thrower's height wasn't 20 m, instead was closer to 1.9m, a much more reasonable height!\n\nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample demonstrating the model parameter estimation feature. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from progpy.models.thrown_object import ThrownObject\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Build the model with your best guess in parameters\n", + " # Here we're guessing that the thrower is 20 meters tall. Obviously not true!\n", + " # Let's see if parameter estimation can fix this\n", + " m = ThrownObject(thrower_height=20)\n", + "\n", + " # Step 2: Collect data from the use of the system. Let's pretend we threw the ball once, and collected position measurements\n", + " times = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n", + " inputs = [{}] * 9\n", + " outputs = [\n", + " {\"x\": 1.83},\n", + " {\"x\": 36.95},\n", + " {\"x\": 62.36},\n", + " {\"x\": 77.81},\n", + " {\"x\": 83.45},\n", + " {\"x\": 79.28},\n", + " {\"x\": 65.3},\n", + " {\"x\": 41.51},\n", + " {\"x\": 7.91},\n", + " ]\n", + "\n", + " # Step 3: Identify the parameters to be estimated\n", + " keys = [\"thrower_height\", \"throwing_speed\"]\n", + "\n", + " # Printing state before\n", + " print(\"Model configuration before\")\n", + " for key in keys:\n", + " print(\"-\", key, m.parameters[key])\n", + " print(\" Error: \", m.calc_error(times, inputs, outputs, dt=1e-4))\n", + "\n", + " # Step 4: Run parameter estimation with data\n", + " m.estimate_params([(times, inputs, outputs)], keys, dt=0.01)\n", + "\n", + " # Print result\n", + " print(\"\\nOptimized configuration\")\n", + " for key in keys:\n", + " print(\"-\", key, m.parameters[key])\n", + " print(\" Error: \", m.calc_error(times, inputs, outputs, dt=1e-4))\n", + "\n", + " # Sure enough- parameter estimation determined that the thrower's height wasn't 20 m, instead was closer to 1.9m, a much more reasonable height!\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_downloads/ff26a9b60d1b5b66f3a1ac7219de857d/benchmarking.ipynb b/docs/_downloads/ff26a9b60d1b5b66f3a1ac7219de857d/benchmarking.ipynb index 9585579d..a21e525d 100644 --- a/docs/_downloads/ff26a9b60d1b5b66f3a1ac7219de857d/benchmarking.ipynb +++ b/docs/_downloads/ff26a9b60d1b5b66f3a1ac7219de857d/benchmarking.ipynb @@ -1,54 +1,81 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\nExample benchmarking the computational efficiency of models. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from timeit import timeit\nfrom progpy.models import BatteryCircuit\n\ndef run_example():\n # Step 1: Create a model object\n batt = BatteryCircuit()\n \n # Step 2: Define future loading function \n def future_loading(t, x=None):\n # Constant Loading\n return batt.InputContainer({'i': 2})\n\n # Step 3: Benchmark simulation of 600 seconds\n print('Benchmarking...')\n def sim(): \n results = batt.simulate_to(600, future_loading)\n time = timeit(sim, number=500)\n\n # Print results\n print('Simulation Time: {} ms/sim'.format(time*2))\n\n# This allows the module to be executed directly \nif __name__=='__main__':\n run_example()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.9" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] }, - "nbformat": 4, - "nbformat_minor": 0 + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nExample benchmarking the computational efficiency of models. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from timeit import timeit\n", + "from progpy.models import BatteryCircuit\n", + "\n", + "\n", + "def run_example():\n", + " # Step 1: Create a model object\n", + " batt = BatteryCircuit()\n", + "\n", + " # Step 2: Define future loading function\n", + " def future_loading(t, x=None):\n", + " # Constant Loading\n", + " return batt.InputContainer({\"i\": 2})\n", + "\n", + " # Step 3: Benchmark simulation of 600 seconds\n", + " print(\"Benchmarking...\")\n", + "\n", + " def sim():\n", + " results = batt.simulate_to(600, future_loading)\n", + "\n", + " time = timeit(sim, number=500)\n", + "\n", + " # Print results\n", + " print(\"Simulation Time: {} ms/sim\".format(time * 2))\n", + "\n", + "\n", + "# This allows the module to be executed directly\n", + "if __name__ == \"__main__\":\n", + " run_example()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/docs/_sources/api_ref/progpy/DataModel.rst b/docs/_sources/api_ref/progpy/DataModel.rst index d3a490c1..443faed4 100644 --- a/docs/_sources/api_ref/progpy/DataModel.rst +++ b/docs/_sources/api_ref/progpy/DataModel.rst @@ -18,9 +18,9 @@ There are a few ways to construct a :py:class:`DataModel` object, described belo From Data ************************************************* -This is the most common way to construct a :py:class:`DataModel` object, using the :py:func:`DataModel.from_data` method. It involves using one or more runs of data to train the model. Each DataModel class expects different data from the following set: times, inputs, states, outputs, and event_states. See documentation for the specific algorithm to see what it expects. Below is an example if it's use with the LSTMStateTransitionModel, which expects inputs and outputs. +This is the most common way to construct a :py:class:`DataModel` object, using the :py:func:`DataModel.from_data` method. It involves using one or more runs of data to train the model. Each ``DataModel`` class expects different data from the following set: times, inputs, states, outputs, and event_states. Below is an example if it's use with the ``LSTMStateTransitionModel``, which expects inputs and outputs. -.. dropdown:: example +.. dropdown:: DataModel Example .. code-block:: python @@ -31,9 +31,9 @@ This is the most common way to construct a :py:class:`DataModel` object, using t From Another PrognosticsModel (i.e., Surrogate) ************************************************* -Surrogate models are constructed using the :py:func:`DataModel.from_model` Class Method. These models are trained using data from the original model, i.e., as a surrogate for the original model. The original model is not modified. Below is an example if it's use. In this example a surrogate (m2) of the original ThrownObject Model (m) is created, and can then be used interchangeably with the original model. +Surrogate models are constructed using the :py:func:`DataModel.from_model` class method. These models are trained using data from the original model, i.e., as a surrogate for the original model. The original model is not modified. Below is an example if it's use. In this example a surrogate (m2) of the original ``ThrownObject`` model (m) is created, and can then be used interchangeably with the original model. -.. dropdown:: example +.. dropdown:: Surrogate Model Example .. code-block:: python diff --git a/docs/_sources/api_ref/progpy/DiscreteStates.rst b/docs/_sources/api_ref/progpy/DiscreteStates.rst new file mode 100644 index 00000000..3c5b845e --- /dev/null +++ b/docs/_sources/api_ref/progpy/DiscreteStates.rst @@ -0,0 +1,6 @@ +Discrete States +================ + +:term:`Discrete States` are a representation that can only occupy one of a finite set of predefined values (e.g., engine gear or switch). Discrete states are initialized using the function :py:func:`progpy.create_discrete_state`, described below. + +.. autofunction:: progpy.create_discrete_state diff --git a/docs/_sources/api_ref/progpy/IncludedModels.rst b/docs/_sources/api_ref/progpy/IncludedModels.rst index 1b6c4e22..b0cd0ea4 100644 --- a/docs/_sources/api_ref/progpy/IncludedModels.rst +++ b/docs/_sources/api_ref/progpy/IncludedModels.rst @@ -24,6 +24,10 @@ Battery Model .. autoclass:: progpy.models.BatteryElectroChemEODEOL + .. tab:: Simplified + + .. autoclass:: progpy.models.SimplifiedBattery + .. tab:: Circuit .. autoclass:: progpy.models.BatteryCircuit diff --git a/docs/_sources/api_ref/progpy/StateEstimator.rst b/docs/_sources/api_ref/progpy/StateEstimator.rst index 852d459e..5007badd 100644 --- a/docs/_sources/api_ref/progpy/StateEstimator.rst +++ b/docs/_sources/api_ref/progpy/StateEstimator.rst @@ -1,6 +1,6 @@ State Estimators =========================== -The State Estimator uses sensor information and a Prognostics Model to produce an estimate of system state (which can be used to estimate outputs, event_states, and performance metrics). This state estimate can either be used by itself or as input to a `Predictor `__. A state estimator is typically run each time new information is available. +The State Estimator uses sensor information and a Prognostics Model to produce an estimate of system state (which can be used to estimate outputs, event_states, and performance metrics). This state estimate can either be used by itself or as input to a `Predictor `__. A state estimator is typically run each time new information is available. Here's an example of its use. In this example we use the unscented kalman filter state estimator and the ThrownObject model. diff --git a/docs/_sources/api_ref/progpy/UncertainData.rst b/docs/_sources/api_ref/progpy/UncertainData.rst index 863cd139..51f4a169 100644 --- a/docs/_sources/api_ref/progpy/UncertainData.rst +++ b/docs/_sources/api_ref/progpy/UncertainData.rst @@ -1,7 +1,7 @@ Uncertain Data ======================= -The `progpy.uncertain_data` package includes classes for representing data with uncertainty. All types of UncertainData can be operated on using `the interface <#interface>`__. Inidividual classes for representing uncertain data of different kinds are described below, in `Implemented UncertainData Types <#implemented-uncertaindata-types>`__. +The `progpy.uncertain_data` package includes classes for representing data with uncertainty. All types of UncertainData can be operated on using `the interface <#interface>`__. Individual classes for representing uncertain data of different kinds are described below, in `Implemented UncertainData Types <#implemented-uncertaindata-types>`__. Interface ------------------------ diff --git a/docs/_sources/dev_guide.rst b/docs/_sources/dev_guide.rst index 2ade5257..eb968812 100644 --- a/docs/_sources/dev_guide.rst +++ b/docs/_sources/dev_guide.rst @@ -13,19 +13,31 @@ This document includes some details relevant for developers working on any of th Installing from a Branch ------------------------ To install the package package from a specific branch. First clone the repository and checkout the branch. Then navigate into the repository directory and use the following command: - `pip install -e .` + +.. code-block:: console + + $ pip install -e . This command installs the package using the checked-out version. Running Tests ------------------------ The run the progpy tests, first clone the repository and checkout the branch, installing the package using the command above. Then navigate into the repository directory. Next install the tests required dependencies, by using the following commands: - `pip install notebook` - `pip install testbook` - `pip install requests` +.. code-block:: console + + $ pip install '.[test]' + + Then run the tests using the following command: - `python -m tests` + +.. code-block:: console + + $ python -m tests + +.. admonition:: Note + + Tests on data-driven tools (e.g., LSTM model) will need dependencies from the ``datadriven`` option installed. Contributing --------------- @@ -43,7 +55,7 @@ Project Roles Branching Strategy ------------------ -Our project is following the git strategy described `here `__. Release branches are not required. Details specific to each branch are described below. We recommend that developers from within NASA watch `this video ` on git strategies and best practices. +Our project is following the git strategy described `here `__. Release branches are not required. Details specific to each branch are described below. We recommend that developers from within NASA watch `this video `_ on git strategies and best practices. `master`: Every merge into the master branch is done using a pull request (never commiting directly), is assigned a release number, and must complete the release checklist. The release checklist is a software assurance tool. @@ -61,9 +73,8 @@ PR Checklist * Ensure errors from static analysis must be resolved. * Review the test coverage reports (if there is a change) * Review the software benchmarking results (if there is a change) -* For added dependencies (new) - * Add to requirements.txt, - * Add to setup.py, +* For added dependencies + * Add to ``pyproject.toml`` * Add to the bottom of dev_guide.rst (this document) * Notify Project Manager * All warnings from static analysis must be reviewed and resolved - if deemed appropriate. @@ -85,7 +96,7 @@ A release is the merging of a PR where the target is the master branch. * Check that each new feature has corresponding tests * [Complete - checked automatically in PRs to dev] Confirm that every page has the copyright notice * Confirm added dependencies are at the following: - * setup.py, + * ``pyproject.toml``, * the bottom of npr7150.rst * Confirm that all issues associated with the release have been closed (i.e., requirements have been met) or assigned to another release * Run unit tests `python -m tests` on the following computer types: @@ -103,19 +114,26 @@ A release is the merging of a PR where the target is the master branch. * Check documents * Check that all desired examples are in docs * General review: see if any updates are required -* Rebuild sphinx documents: `sphinx-build sphinx_config/ docs/` +* Rebuild sphinx documents: `sphinx-build sphinx-config/ docs/` * Write release notes -* Update version number in src/\*/__init__.py and setup.py +* Update version number in ``src/\*/__init__.py``, ``sphinx-config/conf.py``, and ``pyproject.toml`` * For releases adding new features- ensure that NASA release process has been followed. * Confirm that on GitHub Releases page, the next release has been started and that a schedule is present including at least Release Date, Release Review Date, and Release Branch Opening Date. Updating Documentation ************************** -Use the following command to update documentation (requires sphinx). Documentation is in the progpy repository. +Use the following commands to get the relevant dependencies and update the documentation. Documentation is in the progpy repository. -.. code-block: bash +.. code-block:: console + + $ pip install '.[docs]' + $ sphinx-build sphinx-config/ docs/ + +Sphinx-autobuild can be used to automatically rebuild the documentation when changes are made. + +.. code-block:: console - sphinx-build sphinx_config docs\ + $ sphinx-autobuild sphinx-config/ docs/ Uploading new version to PyPI ******************************* diff --git a/docs/_sources/glossary.rst b/docs/_sources/glossary.rst index bf32b492..b961cc8e 100644 --- a/docs/_sources/glossary.rst +++ b/docs/_sources/glossary.rst @@ -99,3 +99,15 @@ Glossary system-of-systems A system consisting of multiple inter-related systems, where one system affects the others. In ProgPy, system-of-systems are reporsented using :term:`composite models `. Composite models are implemented using the :py:class:`progpy.CompositeModel` class. + + discrete state + A system state representation that can only occupy one of a finite set of predefined values. Transitions between discrete states occur based on defined logic or triggering events. Discrete states are initialized using the function :py:func:`progpy.create_discrete_state`. + + continuous state + A system state representation that can vary smoothly over a continuous range of values. Discrete states are initialized using a floating point number. + + discrete model + A model where state transition is discrete. Discrete models define the `next_state` equation for state transition. + + continuous model + A model where state transition is continuous. Discrete models define the `dx` equation for state transition. diff --git a/docs/_sources/guide.rst b/docs/_sources/guide.rst index 6a6e5753..f7a6fd9d 100644 --- a/docs/_sources/guide.rst +++ b/docs/_sources/guide.rst @@ -85,10 +85,10 @@ In the prediction step, the state estimate at the prediction time and system mod Algorithms for :term:`state estimation` and :term:`prediction` along with tools analyzing and visualizing results of state estimation and prediction, managing uncertainty, and creating new state estimators or predictors, see the :ref:`State Estimation and Prediction Guide`. -More information +More Information ------------------------------ -For more information, see the inidividual guides +For more information, see the individual guides. .. panels:: :img-top-cls: pt-2, pb-2 diff --git a/docs/_sources/index.rst b/docs/_sources/index.rst index e345eef6..3f374cca 100644 --- a/docs/_sources/index.rst +++ b/docs/_sources/index.rst @@ -21,6 +21,7 @@ ProgPy documentation is split into three senctions described below. guide api_ref + troubleshooting releases glossary dev_guide @@ -31,19 +32,19 @@ Citing This Repository ----------------------- Use the following to cite this repository: -@misc{2023_nasa_progpy, +@misc{2025_nasa_progpy, | author = {Christopher Teubert and Katelyn Jarvis Griffith and Matteo Corbetta and Chetan Kulkarni and Portia Banerjee and Jason Watkins and Matthew Daigle}, | title = {{ProgPy Python Prognostics Packages}}, | month = May, - | year = 2024, - | version = {1.7}, + | year = 2025, + | version = {1.8}, | url = {https://nasa.github.io/progpy} | doi = {10.5281/ZENODO.8097013} | } The corresponding reference should look like this: -C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, J. Watkins, M. Daigle, ProgPy Python Prognostics Packages, v1.7, May 2024. URL https://github.com/nasa/progpy. +C. Teubert, K. Jarvis Griffith, M. Corbetta, C. Kulkarni, P. Banerjee, J. Watkins, M. Daigle, ProgPy Python Prognostics Packages, v1.8, May 2025. URL https://github.com/nasa/progpy. Contributing and Partnering ----------------------------- diff --git a/docs/_sources/installing.rst b/docs/_sources/installing.rst index c3f440ed..b7b0e562 100644 --- a/docs/_sources/installing.rst +++ b/docs/_sources/installing.rst @@ -33,3 +33,5 @@ Installing ProgPy .. code-block:: console $ pip install -e '.[datadriven]' + +For help with troubleshooting, see the :ref:`troubleshooting`. diff --git a/docs/_sources/prog_algs_guide.rst b/docs/_sources/prog_algs_guide.rst index b609b877..2e12efd4 100644 --- a/docs/_sources/prog_algs_guide.rst +++ b/docs/_sources/prog_algs_guide.rst @@ -66,7 +66,7 @@ The internal state is stored in the estimators x property as a UncertainData sub .. autoclass:: progpy.state_estimators.KalmanFilter -.. dropdown:: Example +.. dropdown:: State Estimation Example Here's an example of its use. In this example we use the unscented kalman filter state estimator and the ThrownObject model. @@ -113,7 +113,7 @@ A predictors ``predict`` method is used to perform prediction, generally defined result = predictor.predict(x0, future_loading, **config) -Where x0 is the initial state as an UncertainData object (often the output of state estimation), future_loading is a function defining future loading as a function of state and time, and config is a dictionary of any additional configuration parameters, specific to the predictor being used. See `Predictors `__ for options available for each predictor +Where x0 is the initial state as an UncertainData object (often the output of state estimation), future_loading is a function defining future loading as a function of state and time, and config is a dictionary of any additional configuration parameters, specific to the predictor being used. See `Predictors `__ for options available for each predictor The result of the predict method is a named tuple with the following members: @@ -123,11 +123,11 @@ The result of the predict method is a named tuple with the following members: * **event_states**: :py:class:`progpy.predictors.Prediction` object containing predicted event states at each savepoint such that event_states.snapshot(i) corresponds to times[i] * **time_of_event**: :py:class:`progpy.uncertain_data.UncertainData` object containing the predicted Time of Event (ToE) for each event. Additionally, final state at time of event is saved at time_of_event.final_state -> :py:class:`progpy.uncertain_data.UncertainData` for each event -The stepsize and times at which results are saved can be defined like in a simulation. See `Simulation `__. +The stepsize and times at which results are saved can be defined like in a simulation. See `Simulation `__. .. dropdown:: Included Predictors - ProgPy includes a number of predictors in the *progpy.predictors* package. The most commonly used of these are highlighted below. See `Predictors `__ for a full list of supported predictors. + ProgPy includes a number of predictors in the *progpy.predictors* package. The most commonly used of these are highlighted below. See `Predictors `__ for a full list of supported predictors. * **Unscented Transform (UT)**: A type of predictor for non-linear models where the state distribution is represented by a set of sigma points, calculated by an unscented tranform. Sigma points are propogated forward with time until the pass the threshold. The times at which each sigma point passes the threshold are converted to a distribution of time of event. The predicted future states and time of event are represented by a :py:class:`progpy.uncertain_data.MultivariateNormalDist`. By it's nature, UTs are much faster than MCs, but they fit the data to a normal distribution, resulting in some loss of information. * **Monte Carlo (MC)**: A sample-based prediction algorithm, where the distribution of likely states is represented by a set of unweighted samples. These samples are propagated forward with time. By its nature, MC is more accurate than a PF, but much slower. The predicted future states and time of event are represented by a :py:class:`progpy.uncertain_data.UnweightedSamples`. Full accuracy of MC can be adjusted by increasing or decreasing the number of samples @@ -145,7 +145,7 @@ The stepsize and times at which results are saved can be defined like in a simul Extending Predictors ********************** -New :term:`predictor` are created by extending the :class:`progpy.predictors.Predictor` class. +A new :term:`predictor` is created by extending the :class:`progpy.predictors.Predictor` class. Analyzing Results @@ -164,7 +164,7 @@ The results of the state estimation are stored in an object of type :class:`prog * **percentage_in_bounds**: The percentage of the state estimate that is within defined bounds. * **relative_accuracy**: Relative accuracy is how close the mean of the distribution is to the ground truth, on relative terms -There are also a number of figures available to describe a state estimate, described below +There are also a number of figures available to describe a state estimate, described below. .. dropdown:: Scatter Plot @@ -226,8 +226,8 @@ Predicted Future States Predicted future states, inputs, outputs, and event states come in the form of a :class:`progpy.predictors.Prediction` object. Predictions store distributions of predicted future values at multiple future times. Predictions contain a number of tools for analyzing the results, some of which are described below: * **mean**: Estimate the mean value at each time. The result is a list of dictionaries such that prediction.mean[i] corresponds to times[i] -* **monotonicity**: Given a single prediction, for each event: go through all predicted states and compare those to the next one. - Calculates monotonicity for each event key using its associated mean value in UncertainData [#Baptista2022]_ [#Coble2021]_ + +* **monotonicity**: Given a single prediction, for each event: go through all predicted states and compare those to the next one. Calculates monotonicity for each event key using its associated mean value in ``UncertainData``. [#Baptista2022]_ [#Coble2021]_ Time of Event (ToE) @@ -235,7 +235,7 @@ Time of Event (ToE) Time of Event is also stored as an object of type :class:`progpy.uncertain_data.UncertainData`, so the analysis functions described in :ref:`State Estimation` are also available for a ToE estimate. See :ref:`State Estimation` or :class:`progpy.uncertain_data.UncertainData` documentation for details. -In addition to these standard UncertainData metrics, Probability of Success (PoS) is an important metric for prognostics. Probability of Success is the probability that a event will not occur before a defined time. For example, in aeronautics, PoS might be the probability that no failure will occur before end of mission. +In addition to these standard ``UncertainData`` metrics, Probability of Success (PoS) is an important metric for prognostics. Probability of Success is the probability that a event will not occur before a defined time. For example, in aeronautics, PoS might be the probability that no failure will occur before end of mission. Below is an example calculating probability of success: @@ -247,14 +247,14 @@ Below is an example calculating probability of success: ToE Prediction Profile ************************** -A :class:`progpy.predictors.ToEPredictionProfile` contains Time of Event (ToE) predictions performed at multiple points. ToEPredictionProfile is frequently used to evaluate the prognostic quality for a given prognostic solution. It contains a number of methods to help with this, including: +A :class:`progpy.predictors.ToEPredictionProfile` contains Time of Event (ToE) predictions performed at multiple points. ``ToEPredictionProfile`` is frequently used to evaluate the prognostic quality for a given prognostic solution. It contains a number of methods to help with this, including: * **alpha_lambda**: Whether the prediction falls within specified limits at particular times with respect to a performance measure [#Goebel2017]_ [#Saxena2010]_ * **cumulate_relative_accuracy**: The sum of the relative accuracies of each prediction, given a ground truth * **monotonicity**: The monotonicity of the prediction series [#Baptista2022]_ [#Coble2021]_ * **prognostic_horizon**: The difference between a time :math:`t_i`, when the predictions meet specified performance criteria, and the time corresponding to the true Time of Event (ToE), for each event [#Goebel2017]_ [#Saxena2010]_ -A ToEPredictionProfile also contains a plot method (:pythoncode:`profile.plot(...)`), which looks like this: +A ``ToEPredictionProfile`` also contains a plot method (:pythoncode:`profile.plot(...)`), which looks like this: .. image:: images/alpha_chart.png diff --git a/docs/_sources/prog_models_guide.rst b/docs/_sources/prog_models_guide.rst index 57625eec..14bd3efd 100644 --- a/docs/_sources/prog_models_guide.rst +++ b/docs/_sources/prog_models_guide.rst @@ -50,7 +50,7 @@ States are transitioned forward in time using the state transition equation. where :math:`x(t)` is :term:`state` at time :math:`t`, :math:`u(t)` is :term:`input` at time :math:`t` , :math:`dt` is the stepsize, and :math:`\Theta` are the model :term:`parameters` . -In a ProgPy model, this state transition can be represented one of two ways, either discrete or continuous, depending on the nature of state transition. In the case of continuous models, state transition behavior is defined by defining the first derivative, using the :py:func:`progpy.PrognosticsModel.dx` method. For discrete models, state transition behavior is defined using the :py:func:`progpy.PrognosticsModel.next_state` method. The continuous state transition behavior is recommended, because defining the first derivative enables some approaches that rely on that information. +In a ProgPy model, this state transition can be represented one of two ways, either discrete or continuous, depending on the nature of state transition. In the case of :term:`continuous models`, state transition behavior is defined by defining the first derivative, using the :py:func:`progpy.PrognosticsModel.dx` method. For :term:`discrete models `, state transition behavior is defined using the :py:func:`progpy.PrognosticsModel.next_state` method. The continuous state transition behavior is recommended, because defining the first derivative enables some approaches that rely on that information. .. image:: images/next_state.png :width: 70 % @@ -60,8 +60,24 @@ In a ProgPy model, this state transition can be represented one of two ways, eit :width: 70 % :align: center +States can also be discrete or continuous. :term:`Discrete states` are those which can only exist in a finite set of values. Continuous states are initialized with a number and discrete states are initialized using the function :py:func:`progpy.create_discrete_state`, like the examples below. Each discrete state represents a unique condition or mode, and transitions between states are governed by defined rules or events, providing clarity and predictability in state management. -.. dropdown:: State transition equation example +.. code-block:: python + + >>> from progpy import create_discrete_state + >>> ValveState = create_discrete_state(2, ["open", "closed"]) + >>> x["valve"] = ValveState.open + +.. code-block:: python + + >>> from progpy import create_discrete_state + >>> GearState = create_discrete_state(5, transition="sequential") + >>> x["gear"] = GearState(1) + +.. note:: + :term:`Discrete states ` are different from :term:`discrete models `. Discrete models are models where state transition is discrete, where discrete states are where the state itself is discrete. Discrete models may have continuous states. + +.. dropdown:: State Transition Equation Example An example of a state transition equation for a thrown object is included below. In this example, a model is created to describe an object thrown directly into the air. It has two states: position (x) and velocity (v), and no inputs. @@ -105,7 +121,7 @@ Outputs are a function of only the system state (x) and :term:`parameters` (:mat -.. dropdown:: Output equation example +.. dropdown:: Output Equation Example An example of a output equation for a thrown object is included below. In this example, a model is created to describe an object thrown directly into the air. It has two states: position (x) and velocity (v). In this case we're saying that the position of the object is directly measurable. @@ -209,9 +225,9 @@ Parameters can be set in model construction, using the *parameters* property aft The specific parameters are very specific to the system being modeled. For example, a battery might have parameters for the capacity and internal resistance. When using provided models, see the documentation for that model for details on parameters supported. -.. dropdown:: Derived parameters +.. dropdown:: Derived Parameters - Sometimes users would like to specify parameters as a function of other parameters. This feature is called "derived parameters". See example below for more details on this feature. + Sometimes users would like to specify parameters as a function of other parameters. This feature is called "derived parameters". See the derived parameters section in the example below for more details on this feature. * :download:`04 New Models <../../progpy/examples/04_New Models.ipynb>` @@ -236,10 +252,9 @@ In practice, it is impossible to have absolute knowledge of future states due to Future loading noise is used to represent uncertainty in knowledge of how the system will be loaded in the future (See :ref:`Future Loading`). Future loading noise is applied by the user in their provided future loading method by adding random noise to the estimated future load. -See example below for details on how to configure proccess and measurement noise in ProgPy +See the noise section in the example below for details on how to configure proccess and measurement noise in ProgPy. -* :download:`examples.noise <../../progpy/examples/noise.py>` - .. automodule:: noise +* :download:`01 Simulation <../../progpy/examples/01_Simulation.ipynb>` Future Loading ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -254,9 +269,9 @@ Future loading is provided by the user either using the predifined loading class # Calculate inputs return m.InputContainer({'input1': ...}) -See example below for details on how to provide future loading information in ProgPy. +See the future loading section in the example below for details on how to provide future loading information in ProgPy. -* :download:`01. Simulation <../../progpy/examples/01_Simulation.ipynb>` +* :download:`01 Simulation <../../progpy/examples/01_Simulation.ipynb>` General Notes ^^^^^^^^^^^^^^^^ @@ -268,20 +283,20 @@ Building New Models ProgPy provides a framework for building new models. Generally, models can be divided into three basis categories: :term:`physics-based models`, :term:`data-driven models`, and hybrid models. Additionally, models can rely on state-transition for prediction, or they can use what is called direct-prediction. These two categories are described below. -State-transition Models +State-Transition Models ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. tabs:: - .. tab:: physics-based + .. tab:: Physics-Based New :term:`physics-based models` are constructed by subclassing :py:class:`progpy.PrognosticsModel` as illustrated in the first example. To generate a new model, create a new class for your model that inherits from this class. Alternatively, you can copy the template :download:`prog_model_template.ProgModelTemplate <../../progpy/prog_model_template.py>`, replacing the methods with logic defining your specific model. The analysis and simulation tools defined in :class:`progpy.PrognosticsModel` will then work with your new model. For simple linear models, users can choose to subclass the simpler :py:class:`progpy.LinearModel` class, as illustrated in the second example. Some methods and algorithms only function on linear models. - * :download:`04. New Models <../../progpy/examples/04_New Models.ipynb>` + * :download:`04 New Models <../../progpy/examples/04_New Models.ipynb>` - .. tab:: data-driven + .. tab:: Data-Driven New :term:`data-driven models`, such as those using neural networks, are created by subclassing the :py:class:`progpy.data_models.DataModel` class, overriding the ``from_data`` method. @@ -313,18 +328,17 @@ State-transition Models * :download:`examples.custom_model <../../progpy/examples/custom_model.py>` .. automodule:: custom_model -Direct-prediction models +Direct-Prediction Models ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:term:`Direct-prediction models` are models that estimate :term:`time of event` directly from the current state and :term:`future load`, instead of being predicted through state transition. When models are pure direct-prediction models, future states cannot be predicted. See example below for more information. +:term:`Direct-prediction models` are models that estimate :term:`time of event` directly from the current state and :term:`future load`, instead of being predicted through state transition. When models are pure direct-prediction models, future states cannot be predicted. See the direct models section in the example below for more information. -* :download:`examples.direct_model <../../progpy/examples/direct_model.py>` - .. automodule:: direct_model +* :download:`04 New Models <../../progpy/examples/04_New Models.ipynb>` Using Data ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Wether you're using :term:`data-driven`, :term:`physics-based`, expert knowledge, or some hybrid approach, building and validating a model requires data. In the case of data-driven approaches, data is used to train and validate the model. In the case of physics-based, data is used to estimate parameters (see `Parameter Estimation`) and validate the model. +Whether you're using :term:`data-driven`, :term:`physics-based`, expert knowledge, or some hybrid approach, building and validating a model requires data. In the case of data-driven approaches, data is used to train and validate the model. In the case of physics-based, data is used to estimate parameters (see `Parameter Estimation`) and validate the model. ProgPy includes some example datasets. See `ProgPy Datasets `_ and the example below for details. @@ -333,18 +347,14 @@ ProgPy includes some example datasets. See `ProgPy Datasets `__. The examples below illustrate use of some of the models provided in the :py:mod:`progpy.models` module. - -* :download:`examples.sim <../../progpy/examples/sim.py>` - .. automodule:: sim +For details on the included models, see `Included Models `__. The examples below also illustrate the use of some models provided in the :py:mod:`progpy.models` module. -* :download:`examples.sim_battery_eol <../../progpy/examples/sim_battery_eol.py>` - .. automodule:: sim_battery_eol +* :download:`03 Included Models <../../progpy/examples/03_Existing Models.ipynb>` * :download:`examples.sim_pump <../../progpy/examples/sim_pump.py>` .. automodule:: sim_pump @@ -369,7 +379,7 @@ One of the most basic of functions using a model is simulation. Simulation is th .. role:: pythoncode(code) :language: python -.. dropdown:: Saving results +.. dropdown:: Saving Results :py:meth:`progpy.PrognosticsModel.simulate_to` and :py:meth:`progpy.PrognosticsModel.simulate_to_threshold` return the inputs, states, outputs, and event states at various points in the simulation. Returning these values for every timestep would require a lot of memory, and is not necessary for most use cases, so ProgPy provides an ability for users to specify what data to save. @@ -392,9 +402,9 @@ One of the most basic of functions using a model is simulation. Simulation is th .. admonition:: Note :class: tip - Data will always be saved at the next time after the save_pt or save_freq. As a result the data may not correspond to the exact time specified. Use automatic step sizes to save at the exact time. + Data will always be saved at the next time after the ``save_pt`` or ``save_freq``. As a result, the data may not correspond to the exact time specified. Use automatic step sizes to save at the exact time. -.. dropdown:: Step size +.. dropdown:: Step Size Step size is the size of the step taken in integration. It is specified by the ``dt`` argument. It is an important consideration when simulating. Too large of a step size could result in wildly incorrect results, and two small of a step size can be computationally expensive. Step size can be provided in a few different ways, described below: @@ -439,13 +449,9 @@ One of the most basic of functions using a model is simulation. Simulation is th Now loading is applied correctly. -Use of simulation is described further in the following examples: - -* :download:`examples.sim <../../progpy/examples/sim.py>` - .. automodule:: sim +For simulation examples, see the following notebook for details. -* :download:`examples.noise <../../progpy/examples/noise.py>` - .. automodule:: noise +* :download:`01 Simulation <../../progpy/examples/01_Simulation.ipynb>` Parameter Estimation ---------------------------- @@ -461,12 +467,12 @@ Generally, parameter estimation is done by tuning the parameters of the model so >>> params_to_estimate = ['param1', 'param2'] >>> m.estimate_params([run1_data, run2_data], params_to_estimate, dt=0.01) -See the example below for more details +See the example below for more details. .. admonition:: Note :class: tip - Parameters are changes in-place, so the model on which estimate_params is called, is now tuned to match the data + Parameters are changes in-place, so the model on which ``estimate_params`` is called, is now tuned to match the data. Visualizing Results ---------------------------- @@ -484,15 +490,13 @@ See :py:meth:`progpy.sim_result.SimResult.plot` for more details on plotting cap Combination Models ---------------------------- -There are two methods in progpy through which multiple models can be combined and used together: composite models and ensemble models, described below. +There are two methods in progpy through which multiple models can be combined and used together: composite models and ensemble models, described below. For more details, see the example below. -For more details, see: - - * :download:`06. Combining Models <../../progpy/examples/06_Combining Models.ipynb>` +:download:`06. Combining Models <../../progpy/examples/06_Combining Models.ipynb>` .. tabs:: - .. tab:: Composite models + .. tab:: Composite Models Composite models are used to represent the behavior of a system of interconnected systems. Each system is represented by its own model. These models are combined into a single composite model which behaves as a single model. When definiting the composite model the user provides a discription of any connections between the state or output of one model and the input of another. For example, @@ -506,7 +510,7 @@ For more details, see: >>> ] >>> ) - .. tab:: Ensemble models + .. tab:: Ensemble Models Unlike composite models which model a system of systems, ensemble models are used when to combine the logic of multiple models which describe the same system. This is used when there are multiple models representing different system behaviors or conditions. The results of each model are aggregated in a way that can be defined by the user. For example, @@ -517,7 +521,7 @@ For more details, see: >>> aggregator = np.mean >>> ) - .. tab:: MixtureOfExperts models + .. tab:: MixtureOfExperts Models Mixture of Experts (MoE) models combine multiple models of the same system, similar to Ensemble models. Unlike Ensemble Models, the aggregation is done by selecting the "best" model. That is the model that has performed the best over the past. Each model will have a 'score' that is tracked in the state, and this determines which model is best. @@ -534,14 +538,15 @@ Other Examples * :download:`examples.sensitivity <../../progpy/examples/sensitivity.py>` .. automodule:: sensitivity -* :download:`examples.serialization <../../progpy/examples/serialization.py>` - .. automodule:: serialization - -Tips ----- +Tips & Best Practices +---------------------- * If you're only doing diagnostics without prognostics- just define a next_state equation with no change of :term:`state` and don't perform prediction. The :term:`state estimator` can still be used to estimate if any of the :term:`events` have occured. -* Sudden :term:`event's` use a binary :term:`event state` (1=healthy, 0=failed). +* Sudden :term:`events` use a binary :term:`event state` (1=healthy, 0=failed). * You can predict as many :term:`events` as you would like, sometimes one :term:`event` must happen before another, in this case the :term:`event` occurance for event 1 can be a part of the equation for event 2 ('event 2': event_1 and [OTHER LOGIC]). +* Minimize the number of state variables whenever possible +* Whenever possible, if calculations dont include state or inputs, include values as parameters or derived parameters instead of calculating within state transition +* Use constant units throughout the model +* Document all assumptions and limitations References ---------------------------- diff --git a/docs/_sources/prog_server_guide.rst b/docs/_sources/prog_server_guide.rst index b807e55c..e17f165f 100644 --- a/docs/_sources/prog_server_guide.rst +++ b/docs/_sources/prog_server_guide.rst @@ -8,11 +8,11 @@ prog_server Guide -The Prognostics As-A-Service (PaaS) Sandbox (a.k.a., prog_server) is a simplified implementation of a Service-Oriented Architecture (SOA) for performing prognostics (estimation of time until events and future system states) of engineering systems. The PaaS Sandbox is a wrapper around the ProgPy package, allowing one or more users to access the features of these packages through a REST API. The package is intended to be used as a research tool to prototype and benchmark Prognostics As-A-Service (PaaS) architectures and work on the challenges facing such architectures, including Generality, Communication, Security, Environmental Complexity, Utility, and Trust. +The Prognostics As-A-Service (PaaS) Sandbox (a.k.a., ``prog_server``) is a simplified implementation of a Service-Oriented Architecture (SOA) for performing prognostics (estimation of time until events and future system states) of engineering systems. The PaaS Sandbox is a wrapper around the ProgPy package, allowing one or more users to access the features of these packages through a REST API. The package is intended to be used as a research tool to prototype and benchmark Prognostics As-A-Service (PaaS) architectures and work on the challenges facing such architectures, including Generality, Communication, Security, Environmental Complexity, Utility, and Trust. -The PaaS Sandbox is actually two packages, prog_server and prog_client. The prog_server package is a prognostics server that provides the REST API. The prog_client package is a python client that provides functions to interact with the server via the REST API. +The PaaS Sandbox is actually two packages, ``prog_server`` and ``prog_client``. The ``prog_server`` package is a prognostics server that provides the REST API. The ``prog_client`` package is a python client that provides functions to interact with the server via the REST API. -prog_server uses ProgPy. See the :ref:`State Estimation and Prediction Guide ` and :ref:`Modeling and Simulation Guide `. +``prog_server`` uses ProgPy. See the :ref:`State Estimation and Prediction Guide ` and :ref:`Modeling and Simulation Guide `. The PaaS Sandbox is a simplified version of the Prognostics As-A-Service Architecture implented as the PaaS/SWS Safety Service software by the NASA System Wide Safety (SWS) project, building upon the original work of the Convergent Aeronautics Solutions (CAS) project. This implementation is a research tool, and is therefore missing important features that should be present in a full implementation of the PaaS architecture such as authentication and persistent state management. @@ -23,15 +23,15 @@ Installing prog_server .. tab:: Stable Version (Recommended) - The latest stable release of `prog_server` is hosted on PyPi. For most users, this version will be adequate. To install from the command line, use the following command: + The latest stable release of ``prog_server`` is hosted on PyPi. For most users, this version will be adequate. To install from the command line, use the following command: .. code-block:: console $ pip install prog_server - .. tab:: Pre-release + .. tab:: Pre-Release - Users who would like to contribute to `prog_server` or would like to use pre-release features can do so using the `prog_server GitHub repo `__. This isn't recommended for most users as this version may be unstable. To use this version, use the following commands: + Users who would like to contribute to ``prog_server`` or would like to use pre-release features can do so using the `prog_server GitHub repo `__. This isn't recommended for most users as this version may be unstable. To use this version, use the following commands: .. code-block:: console @@ -43,19 +43,19 @@ Installing prog_server About --------- -`prog_server` uses progpy. The best way to learn how to use prog_server is to first learn how to use that package. See :ref:`State Estimation and Prediction Guide ` and :ref:`Modeling and Simulation Guide ` for more details. +``prog_server`` uses progpy. The best way to learn how to use ``prog_server`` is to first learn how to use that package. See :ref:`State Estimation and Prediction Guide ` and :ref:`Modeling and Simulation Guide ` for more details. The PaaS Sandbox is actually two packages, ``prog_server`` and ``prog_client``. The ``prog_server`` package is the server that provides the REST API. The ``prog_client`` package is a python client that uses the REST API (see :py:class:`prog_client.Session`). The ``prog_server`` package is the PaaS Sandbox Server. Once started the server can accept requests from one or more applications requesting prognostics, using its REST API (described in :ref:`prog_server API Reference`). -Starting the prog_server +Starting prog_server -------------------------- -There are two methods for starting the prog_server, described below: +There are two methods for starting the ``prog_server``, described below: .. tabs:: - .. tab:: Command line + .. tab:: Command Line - Generally, you can start the prog_server by running the module, like this: + Generally, you can start the ``prog_server`` by running the module, like this: .. code-block:: console @@ -68,7 +68,7 @@ There are two methods for starting the prog_server, described below: .. tab:: Programatically - There are two methods to start the prog_server in python. The first, below, is non-blocking allowing users to perform other functions while the server is running. + There are two methods to start the ``prog_server`` in python. The first, below, is non-blocking allowing users to perform other functions while the server is running. .. code-block:: python @@ -84,19 +84,11 @@ There are two methods for starting the prog_server, described below: >>> import prog_server >>> prog_server.run() # Starts the server- blocking. - Both run and start accept the following optional keyword arguments: - - * **host** (str): Server host address. Defaults to '127.0.0.1' - * **port** (int): Server port address. Defaults to 5000 - * **debug** (bool): If the server is to be started in debug mode + See :py:func:`prog_server.start` and :py:func:`prog_server.run` for details on accepted command line arguments Examples ------------ -The best way to learn how to use prog_server is to look at examples. There are a number of examples included with prog_server, listed below: - -* :download:`examples.online_prog <../../prog_server/examples/online_prog.py>` - .. automodule:: online_prog +The best way to learn how to use ``prog_server`` is to see the example below. -* :download:`examples.option_scoring <../../prog_server/examples/option_scoring.py>` - .. automodule:: option_scoring +* :download:`10 Prognostics Server <../../progpy/examples/10_Prognostics Server.ipynb>` diff --git a/docs/_sources/releases.rst b/docs/_sources/releases.rst index 84054fa1..e73075bb 100644 --- a/docs/_sources/releases.rst +++ b/docs/_sources/releases.rst @@ -4,6 +4,29 @@ Release Notes .. .. contents:: .. :backlinks: top +Updates in v1.8.0 +---------------------- + +progpy +************** +* **New Feature** :term:`Discrete States` (created by :py:func:`progpy.create_discrete_state`): Inputs, states, outputs, or performance metrics can now be represented by a discrete state object, which will only exist in a set of defined states. See the `Discrete State Notebook `__ for examples of use +* **New model**: Simplified Battery (:py:class:`progpy.models.BatterySimplified`). This is a simplified version of the BatteryElectroChemEOD model by `Gina Sierra et al. `__ first introduced in the `PHM Society Conference ProgPy Tutorial `__ from November 2024. See `Included Models `__ for details +* Support for Python3.13 (with the exception of ProgPy's data driven dependencies due to Tensorflow not yet supporting this Python version) +* Dropped support of end-of-life Python3.7 and Python3.8 +* Improved “ProgPy Short Course”: A series of Jupyter Notebooks designed to help users get started with ProgPy and understand how to use it for prognostics. See https://github.com/nasa/progpy/tree/master/examples +* Various bugfixes and efficiency improvements + +prog_server +************ +* Support for Python3.13 + +Updates in v1.7.1 +---------------------- + +progpy +************** +Hotfix fixing issue with dataset download + Updates in v1.7 ---------------------- @@ -192,7 +215,7 @@ prog_algs * new ```List[float]``` where times[m] corresponds to timepoint m for all samples. * End of Life (EOL)/ Time of Event (ToE) estimates: * previous ```List[float]``` where the times correspond to the time that the first event occurs. - * new ```UnweightedSamples``` where keys correspond to the inidividualevents predicted. + * new ```UnweightedSamples``` where keys correspond to the individual events predicted. * State at time of event (ToE). * previous: element in states. * new: member of ToE structure (e.g., ToE.final_state['event1']). diff --git a/docs/_sources/troubleshooting.rst b/docs/_sources/troubleshooting.rst new file mode 100644 index 00000000..fa58a650 --- /dev/null +++ b/docs/_sources/troubleshooting.rst @@ -0,0 +1,41 @@ +.. _troubleshooting: + +Troubleshooting Guide +============================ +This document includes common ProgPy issues and ways to troubleshoot. + +Tensorflow Version Compatibility +------------------------------------------------ +The current datadriven dependencies require ``tensorflow>=2.18.0`` due to compatibility with newer versions of ``numpy`` (from 2.0) and ``numpoly`` (from 1.3.6). Dependency specifications can be found in ``pyproject.toml``. + +Older versions of `tensorflow` may work with older ``numpy`` and ``numpoly`` versions. One known version that works is ``tensorflow==2.16.2`` with ``numpy==1.26.4`` and ``numpoly==1.2.12``. + +Data-Driven Tools +------------------------ +If you are using data-driven tools (e.g., LSTM model), make sure the datadriven dependencies are installed using the following command: + +.. tabs:: + + .. tab:: Stable Version (Recommended) + + .. code-block:: console + + $ pip install 'progpy[datadriven]' + + .. tab:: Pre-Release + + .. code-block:: console + + $ pip install -e '.[datadriven]' + +Installing ProgPy Data-Driven Tools with Python 3.13 +------------------------------------------------------------------------ +Tensorflow does not support Python3.13 as of the writing of this (April 2025). Until this is fixed, ProgPy data-driven features may not work correctly. If you are having trouble running data-driven features with Python3.13, try with an earlier version of Python. + +Simulation Divergence +----------------------- +Simulation divergance or instability can occur for a variety of reasons. A few suggestions for debugging are included below: + +- Check that your state transition equations are numerically stable +- Verify that your time step (dt) is appropriate for your system dynamics +- Consider using a different integration method if default Euler method fails diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js index 11bd8a54..2ebec279 100644 --- a/docs/_static/documentation_options.js +++ b/docs/_static/documentation_options.js @@ -1,6 +1,6 @@ var DOCUMENTATION_OPTIONS = { URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '1.7', + VERSION: '1.8', LANGUAGE: 'None', COLLAPSE_INDEX: false, BUILDER: 'html', diff --git a/docs/_static/panels-bootstrap.min.css b/docs/_static/panels-bootstrap.min.css new file mode 100644 index 00000000..2327873f --- /dev/null +++ b/docs/_static/panels-bootstrap.min.css @@ -0,0 +1,21 @@ +.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-primary{color:#fff;background-color:#007bff}.badge-primary[href]:focus,.badge-primary[href]:hover{color:#fff;text-decoration:none;background-color:#0062cc}.badge-secondary{color:#fff;background-color:#6c757d}.badge-secondary[href]:focus,.badge-secondary[href]:hover{color:#fff;text-decoration:none;background-color:#545b62}.badge-success{color:#fff;background-color:#28a745}.badge-success[href]:focus,.badge-success[href]:hover{color:#fff;text-decoration:none;background-color:#1e7e34}.badge-info{color:#fff;background-color:#17a2b8}.badge-info[href]:focus,.badge-info[href]:hover{color:#fff;text-decoration:none;background-color:#117a8b}.badge-warning{color:#212529;background-color:#ffc107}.badge-warning[href]:focus,.badge-warning[href]:hover{color:#212529;text-decoration:none;background-color:#d39e00}.badge-danger{color:#fff;background-color:#dc3545}.badge-danger[href]:focus,.badge-danger[href]:hover{color:#fff;text-decoration:none;background-color:#bd2130}.badge-light{color:#212529;background-color:#f8f9fa}.badge-light[href]:focus,.badge-light[href]:hover{color:#212529;text-decoration:none;background-color:#dae0e5}.badge-dark{color:#fff;background-color:#343a40}.badge-dark[href]:focus,.badge-dark[href]:hover{color:#fff;text-decoration:none;background-color:#1d2124}/*! + * Bootstrap v4.4.1 (https://getbootstrap.com/) + * Copyright 2011-2019 The Bootstrap Authors + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */.border-0{border:0 !important}.border-top-0{border-top:0 !important}.border-right-0{border-right:0 !important}.border-bottom-0{border-bottom:0 !important}.border-left-0{border-left:0 !important}.p-0{padding:0 !important}.pt-0{padding-top:0 !important}.pr-0,.px-0{padding-right:0 !important}.pb-0,.py-0{padding-bottom:0 !important}.pl-0,.px-0{padding-left:0 !important}.p-1{padding:.25rem !important}.pt-1,.py-1{padding-top:.25rem !important}.pr-1,.px-1{padding-right:.25rem !important}.pb-1,.py-1{padding-bottom:.25rem !important}.pl-1,.px-1{padding-left:.25rem !important}.p-2{padding:.5rem !important}.pt-2,.py-2{padding-top:.5rem !important}.pr-2,.px-2{padding-right:.5rem !important}.pb-2,.py-2{padding-bottom:.5rem !important}.pl-2,.px-2{padding-left:.5rem !important}.p-3{padding:1rem !important}.pt-3,.py-3{padding-top:1rem !important}.pr-3,.px-3{padding-right:1rem !important}.pb-3,.py-3{padding-bottom:1rem !important}.pl-3,.px-3{padding-left:1rem !important}.p-4{padding:1.5rem !important}.pt-4,.py-4{padding-top:1.5rem !important}.pr-4,.px-4{padding-right:1.5rem !important}.pb-4,.py-4{padding-bottom:1.5rem !important}.pl-4,.px-4{padding-left:1.5rem !important}.p-5{padding:3rem !important}.pt-5,.py-5{padding-top:3rem !important}.pr-5,.px-5{padding-right:3rem !important}.pb-5,.py-5{padding-bottom:3rem !important}.pl-5,.px-5{padding-left:3rem !important}.m-0{margin:0 !important}.mt-0,.my-0{margin-top:0 !important}.mr-0,.mx-0{margin-right:0 !important}.mb-0,.my-0{margin-bottom:0 !important}.ml-0,.mx-0{margin-left:0 !important}.m-1{margin:.25rem !important}.mt-1,.my-1{margin-top:.25rem !important}.mr-1,.mx-1{margin-right:.25rem !important}.mb-1,.my-1{margin-bottom:.25rem !important}.ml-1,.mx-1{margin-left:.25rem !important}.m-2{margin:.5rem !important}.mt-2,.my-2{margin-top:.5rem !important}.mr-2,.mx-2{margin-right:.5rem !important}.mb-2,.my-2{margin-bottom:.5rem !important}.ml-2,.mx-2{margin-left:.5rem !important}.m-3{margin:1rem !important}.mt-3,.my-3{margin-top:1rem !important}.mr-3,.mx-3{margin-right:1rem !important}.mb-3,.my-3{margin-bottom:1rem !important}.ml-3,.mx-3{margin-left:1rem !important}.m-4{margin:1.5rem !important}.mt-4,.my-4{margin-top:1.5rem !important}.mr-4,.mx-4{margin-right:1.5rem !important}.mb-4,.my-4{margin-bottom:1.5rem !important}.ml-4,.mx-4{margin-left:1.5rem !important}.m-5{margin:3rem !important}.mt-5,.my-5{margin-top:3rem !important}.mr-5,.mx-5{margin-right:3rem !important}.mb-5,.my-5{margin-bottom:3rem !important}.ml-5,.mx-5{margin-left:3rem !important}/*! + * Bootstrap v4.4.1 (https://getbootstrap.com/) + * Copyright 2011-2019 The Bootstrap Authors + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */.btn{display:inline-block;font-weight:400;color:#212529;text-align:center;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:transparent;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;line-height:1.5;border-radius:.25rem;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.btn{transition:none}}.btn:visited{color:#212529}.btn:hover{color:#212529;text-decoration:none}.btn.focus,.btn:focus{outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.btn.disabled,.btn:disabled{opacity:.65}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:visited{color:#fff}.btn-primary:hover{color:#fff;background-color:#0069d9;border-color:#0062cc}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#0069d9;border-color:#0062cc;box-shadow:0 0 0 .2rem rgba(38,143,255,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:not(:disabled):not(.disabled).active,.btn-primary:not(:disabled):not(.disabled):active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0062cc;border-color:#005cbf}.btn-primary:not(:disabled):not(.disabled).active:focus,.btn-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(38,143,255,.5)}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:visited{color:#fff}.btn-secondary:hover{color:#fff;background-color:#5a6268;border-color:#545b62}.btn-secondary.focus,.btn-secondary:focus{color:#fff;background-color:#5a6268;border-color:#545b62;box-shadow:0 0 0 .2rem rgba(130,138,145,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:not(:disabled):not(.disabled).active,.btn-secondary:not(:disabled):not(.disabled):active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#545b62;border-color:#4e555b}.btn-secondary:not(:disabled):not(.disabled).active:focus,.btn-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(130,138,145,.5)}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:visited{color:#fff}.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#218838;border-color:#1e7e34;box-shadow:0 0 0 .2rem rgba(72,180,97,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled).active,.btn-success:not(:disabled):not(.disabled):active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled).active:focus,.btn-success:not(:disabled):not(.disabled):active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(72,180,97,.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:visited{color:#fff}.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#138496;border-color:#117a8b;box-shadow:0 0 0 .2rem rgba(58,176,195,.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled).active,.btn-info:not(:disabled):not(.disabled):active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled).active:focus,.btn-info:not(:disabled):not(.disabled):active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(58,176,195,.5)}.btn-warning{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:visited{color:#212529}.btn-warning:hover{color:#212529;background-color:#e0a800;border-color:#d39e00}.btn-warning.focus,.btn-warning:focus{color:#212529;background-color:#e0a800;border-color:#d39e00;box-shadow:0 0 0 .2rem rgba(222,170,12,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled).active,.btn-warning:not(:disabled):not(.disabled):active,.show>.btn-warning.dropdown-toggle{color:#212529;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled).active:focus,.btn-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(222,170,12,.5)}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:visited{color:#fff}.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c82333;border-color:#bd2130;box-shadow:0 0 0 .2rem rgba(225,83,97,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled).active,.btn-danger:not(:disabled):not(.disabled):active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled).active:focus,.btn-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(225,83,97,.5)}.btn-light{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:visited{color:#212529}.btn-light:hover{color:#212529;background-color:#e2e6ea;border-color:#dae0e5}.btn-light.focus,.btn-light:focus{color:#212529;background-color:#e2e6ea;border-color:#dae0e5;box-shadow:0 0 0 .2rem rgba(216,217,219,.5)}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:not(:disabled):not(.disabled).active,.btn-light:not(:disabled):not(.disabled):active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#dae0e5;border-color:#d3d9df}.btn-light:not(:disabled):not(.disabled).active:focus,.btn-light:not(:disabled):not(.disabled):active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(216,217,219,.5)}.btn-dark{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:visited{color:#fff}.btn-dark:hover{color:#fff;background-color:#23272b;border-color:#1d2124}.btn-dark.focus,.btn-dark:focus{color:#fff;background-color:#23272b;border-color:#1d2124;box-shadow:0 0 0 .2rem rgba(82,88,93,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:not(:disabled):not(.disabled).active,.btn-dark:not(:disabled):not(.disabled):active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1d2124;border-color:#171a1d}.btn-dark:not(:disabled):not(.disabled).active:focus,.btn-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(82,88,93,.5)}.btn-outline-primary{color:#007bff;border-color:#007bff}.btn-outline-primary:visited{color:#007bff}.btn-outline-primary:hover{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary.focus,.btn-outline-primary:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#007bff;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled).active,.btn-outline-primary:not(:disabled):not(.disabled):active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:visited{color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary.focus,.btn-outline-secondary:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled).active,.btn-outline-secondary:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-outline-success{color:#28a745;border-color:#28a745}.btn-outline-success:visited{color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success.focus,.btn-outline-success:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled).active,.btn-outline-success:not(:disabled):not(.disabled):active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled).active:focus,.btn-outline-success:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-outline-info{color:#17a2b8;border-color:#17a2b8}.btn-outline-info:visited{color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info.focus,.btn-outline-info:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled).active,.btn-outline-info:not(:disabled):not(.disabled):active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled).active:focus,.btn-outline-info:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:visited{color:#ffc107}.btn-outline-warning:hover{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning.focus,.btn-outline-warning:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled).active,.btn-outline-warning:not(:disabled):not(.disabled):active,.show>.btn-outline-warning.dropdown-toggle{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:visited{color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger.focus,.btn-outline-danger:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled).active,.btn-outline-danger:not(:disabled):not(.disabled):active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:visited{color:#f8f9fa}.btn-outline-light:hover{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light.focus,.btn-outline-light:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled).active,.btn-outline-light:not(:disabled):not(.disabled):active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:not(:disabled):not(.disabled).active:focus,.btn-outline-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-outline-dark{color:#343a40;border-color:#343a40}.btn-outline-dark:visited{color:#343a40}.btn-outline-dark:hover{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark.focus,.btn-outline-dark:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#343a40;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled).active,.btn-outline-dark:not(:disabled):not(.disabled):active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-link{font-weight:400;color:#007bff;text-decoration:none}.btn-link:hover{color:#0056b3;text-decoration:underline}.btn-link.focus,.btn-link:focus{text-decoration:underline;box-shadow:none}.btn-link.disabled,.btn-link:disabled{color:#6c757d;pointer-events:none}.btn-group-lg>.btn,.btn-lg{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.btn-group-sm>.btn,.btn-sm{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:.5rem}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:rgba(0,0,0,0)}.text-wrap{white-space:normal !important}/*! + * Bootstrap v4.4.1 (https://getbootstrap.com/) + * Copyright 2011-2019 The Bootstrap Authors + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */.card{position:relative;display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.card-body{-ms-flex:1 1 auto;flex:1 1 auto;min-height:1px;padding:1.25rem}.card-title{margin-bottom:.75rem}.card-subtitle{margin-top:-.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{padding:.75rem 1.25rem;margin-bottom:0;background-color:rgba(0,0,0,.03);border-bottom:1px solid rgba(0,0,0,.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:.75rem 1.25rem;background-color:rgba(0,0,0,.03);border-top:1px solid rgba(0,0,0,.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-.625rem;margin-bottom:-.75rem;margin-left:-.625rem;border-bottom:0}.card-header-pills{margin-right:-.625rem;margin-left:-.625rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img,.card-img-bottom,.card-img-top{-ms-flex-negative:0;flex-shrink:0;width:100%}.card-img,.card-img-top{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.w-100{width:100% !important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15) !important}.bg-primary{background-color:#007bff !important}a.bg-primary:focus,a.bg-primary:hover,button.bg-primary:focus,button.bg-primary:hover{background-color:#0062cc !important}.bg-secondary{background-color:#6c757d !important}a.bg-secondary:focus,a.bg-secondary:hover,button.bg-secondary:focus,button.bg-secondary:hover{background-color:#545b62 !important}.bg-success{background-color:#28a745 !important}a.bg-success:focus,a.bg-success:hover,button.bg-success:focus,button.bg-success:hover{background-color:#1e7e34 !important}.bg-info{background-color:#17a2b8 !important}a.bg-info:focus,a.bg-info:hover,button.bg-info:focus,button.bg-info:hover{background-color:#117a8b !important}.bg-warning{background-color:#ffc107 !important}a.bg-warning:focus,a.bg-warning:hover,button.bg-warning:focus,button.bg-warning:hover{background-color:#d39e00 !important}.bg-danger{background-color:#dc3545 !important}a.bg-danger:focus,a.bg-danger:hover,button.bg-danger:focus,button.bg-danger:hover{background-color:#bd2130 !important}.bg-light{background-color:#f8f9fa !important}a.bg-light:focus,a.bg-light:hover,button.bg-light:focus,button.bg-light:hover{background-color:#dae0e5 !important}.bg-dark{background-color:#343a40 !important}a.bg-dark:focus,a.bg-dark:hover,button.bg-dark:focus,button.bg-dark:hover{background-color:#1d2124 !important}.bg-white{background-color:#fff !important}.bg-transparent{background-color:transparent !important}.text-justify{text-align:justify !important}.text-left{text-align:left !important}.text-right{text-align:right !important}.text-center{text-align:center !important}.font-weight-light{font-weight:300 !important}.font-weight-lighter{font-weight:lighter !important}.font-weight-normal{font-weight:400 !important}.font-weight-bold{font-weight:700 !important}.font-weight-bolder{font-weight:bolder !important}.font-italic{font-style:italic !important}.text-white{color:#fff !important}.text-primary{color:#007bff !important}a.text-primary:focus,a.text-primary:hover{color:#0056b3 !important}.text-secondary{color:#6c757d !important}a.text-secondary:focus,a.text-secondary:hover{color:#494f54 !important}.text-success{color:#28a745 !important}a.text-success:focus,a.text-success:hover{color:#19692c !important}.text-info{color:#17a2b8 !important}a.text-info:focus,a.text-info:hover{color:#0f6674 !important}.text-warning{color:#ffc107 !important}a.text-warning:focus,a.text-warning:hover{color:#ba8b00 !important}.text-danger{color:#dc3545 !important}a.text-danger:focus,a.text-danger:hover{color:#a71d2a !important}.text-light{color:#f8f9fa !important}a.text-light:focus,a.text-light:hover{color:#cbd3da !important}.text-dark{color:#343a40 !important}a.text-dark:focus,a.text-dark:hover{color:#121416 !important}.text-body{color:#212529 !important}.text-muted{color:#6c757d !important}.text-black-50{color:rgba(0,0,0,.5) !important}.text-white-50{color:rgba(255,255,255,.5) !important}/*! + * Bootstrap v4.4.1 (https://getbootstrap.com/) + * Copyright 2011-2019 The Bootstrap Authors + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media(min-width:576px){.container{max-width:540px}}@media(min-width:768px){.container{max-width:720px}}@media(min-width:992px){.container{max-width:960px}}@media(min-width:1200px){.container{max-width:1140px}}.container-fluid,.container-lg,.container-md,.container-sm,.container-xl{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media(min-width:576px){.container,.container-sm{max-width:540px}}@media(min-width:768px){.container,.container-md,.container-sm{max-width:720px}}@media(min-width:992px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media(min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1140px}}.row{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-auto,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-auto,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-auto,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-auto{position:relative;width:100%;padding-right:15px;padding-left:15px}@media(min-width:576px){.col-sm{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:100%}.col-sm-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-sm-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-sm-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-sm-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-sm-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-sm-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-sm-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-sm-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-sm-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-sm-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-sm-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-sm-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}}@media(min-width:768px){.col-md{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:100%}.col-md-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-md-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-md-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-md-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-md-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-md-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-md-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-md-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-md-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-md-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-md-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-md-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}}@media(min-width:992px){.col-lg{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:100%}.col-lg-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-lg-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-lg-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-lg-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-lg-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-lg-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-lg-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-lg-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-lg-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-lg-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-lg-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-lg-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}}@media(min-width:1200px){.col-xl{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:100%}.col-xl-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-xl-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-xl-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-xl-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-xl-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-xl-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-xl-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-xl-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-xl-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-xl-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-xl-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-xl-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}}.d-flex{display:-ms-flexbox !important;display:flex !important}.sphinx-bs,.sphinx-bs *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.sphinx-bs p{margin-top:0} diff --git a/docs/_static/scripts/fontawesome.js b/docs/_static/scripts/fontawesome.js new file mode 100644 index 00000000..71955b42 --- /dev/null +++ b/docs/_static/scripts/fontawesome.js @@ -0,0 +1,3 @@ +/*! For license information please see fontawesome.js.LICENSE.txt */ +(()=>{var c={654:()=>{!function(){"use strict";var c={},l={};try{"undefined"!=typeof window&&(c=window),"undefined"!=typeof document&&(l=document)}catch(c){}var s=void 0===(v=(c.navigator||{}).userAgent)?"":v,a=c,z=l;function e(c,l){var s,a=Object.keys(c);return Object.getOwnPropertySymbols&&(s=Object.getOwnPropertySymbols(c),l&&(s=s.filter((function(l){return Object.getOwnPropertyDescriptor(c,l).enumerable}))),a.push.apply(a,s)),a}function H(c){for(var l=1;lc.length)&&(l=c.length);for(var s=0,a=new Array(l);sc.length)&&(l=c.length);for(var s=0,a=new Array(l);sc.length)&&(l=c.length);for(var s=0,a=new Array(l);sc.length)&&(l=c.length);for(var s=0,a=new Array(l);s>>0;s--;)l[s]=c[s];return l}function lc(c){return c.classList?cc(c.classList):(c.getAttribute("class")||"").split(" ").filter((function(c){return c}))}function sc(c){return"".concat(c).replace(/&/g,"&").replace(/"/g,""").replace(/'/g,"'").replace(//g,">")}function ac(c){return Object.keys(c||{}).reduce((function(l,s){return l+"".concat(s,": ").concat(c[s].trim(),";")}),"")}function zc(c){return c.size!==K.size||c.x!==K.x||c.y!==K.y||c.rotate!==K.rotate||c.flipX||c.flipY}function ec(){var c,l,s=d,a=B.cssPrefix,z=B.replacementClass,e=':host,:root{--fa-font-solid:normal 900 1em/1 "Font Awesome 6 Solid";--fa-font-regular:normal 400 1em/1 "Font Awesome 6 Regular";--fa-font-light:normal 300 1em/1 "Font Awesome 6 Light";--fa-font-thin:normal 100 1em/1 "Font Awesome 6 Thin";--fa-font-duotone:normal 900 1em/1 "Font Awesome 6 Duotone";--fa-font-sharp-solid:normal 900 1em/1 "Font Awesome 6 Sharp";--fa-font-sharp-regular:normal 400 1em/1 "Font Awesome 6 Sharp";--fa-font-sharp-light:normal 300 1em/1 "Font Awesome 6 Sharp";--fa-font-sharp-thin:normal 100 1em/1 "Font Awesome 6 Sharp";--fa-font-brands:normal 400 1em/1 "Font Awesome 6 Brands"}svg:not(:host).svg-inline--fa,svg:not(:root).svg-inline--fa{overflow:visible;box-sizing:content-box}.svg-inline--fa{display:var(--fa-display,inline-block);height:1em;overflow:visible;vertical-align:-.125em}.svg-inline--fa.fa-2xs{vertical-align:.1em}.svg-inline--fa.fa-xs{vertical-align:0}.svg-inline--fa.fa-sm{vertical-align:-.0714285705em}.svg-inline--fa.fa-lg{vertical-align:-.2em}.svg-inline--fa.fa-xl{vertical-align:-.25em}.svg-inline--fa.fa-2xl{vertical-align:-.3125em}.svg-inline--fa.fa-pull-left{margin-right:var(--fa-pull-margin,.3em);width:auto}.svg-inline--fa.fa-pull-right{margin-left:var(--fa-pull-margin,.3em);width:auto}.svg-inline--fa.fa-li{width:var(--fa-li-width,2em);top:.25em}.svg-inline--fa.fa-fw{width:var(--fa-fw-width,1.25em)}.fa-layers svg.svg-inline--fa{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0}.fa-layers-counter,.fa-layers-text{display:inline-block;position:absolute;text-align:center}.fa-layers{display:inline-block;height:1em;position:relative;text-align:center;vertical-align:-.125em;width:1em}.fa-layers svg.svg-inline--fa{-webkit-transform-origin:center center;transform-origin:center center}.fa-layers-text{left:50%;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);-webkit-transform-origin:center center;transform-origin:center center}.fa-layers-counter{background-color:var(--fa-counter-background-color,#ff253a);border-radius:var(--fa-counter-border-radius,1em);box-sizing:border-box;color:var(--fa-inverse,#fff);line-height:var(--fa-counter-line-height,1);max-width:var(--fa-counter-max-width,5em);min-width:var(--fa-counter-min-width,1.5em);overflow:hidden;padding:var(--fa-counter-padding,.25em .5em);right:var(--fa-right,0);text-overflow:ellipsis;top:var(--fa-top,0);-webkit-transform:scale(var(--fa-counter-scale,.25));transform:scale(var(--fa-counter-scale,.25));-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-bottom-right{bottom:var(--fa-bottom,0);right:var(--fa-right,0);top:auto;-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:bottom right;transform-origin:bottom right}.fa-layers-bottom-left{bottom:var(--fa-bottom,0);left:var(--fa-left,0);right:auto;top:auto;-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:bottom left;transform-origin:bottom left}.fa-layers-top-right{top:var(--fa-top,0);right:var(--fa-right,0);-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-top-left{left:var(--fa-left,0);right:auto;top:var(--fa-top,0);-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:top left;transform-origin:top left}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-2xs{font-size:.625em;line-height:.1em;vertical-align:.225em}.fa-xs{font-size:.75em;line-height:.0833333337em;vertical-align:.125em}.fa-sm{font-size:.875em;line-height:.0714285718em;vertical-align:.0535714295em}.fa-lg{font-size:1.25em;line-height:.05em;vertical-align:-.075em}.fa-xl{font-size:1.5em;line-height:.0416666682em;vertical-align:-.125em}.fa-2xl{font-size:2em;line-height:.03125em;vertical-align:-.1875em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:var(--fa-li-margin,2.5em);padding-left:0}.fa-ul>li{position:relative}.fa-li{left:calc(var(--fa-li-width,2em) * -1);position:absolute;text-align:center;width:var(--fa-li-width,2em);line-height:inherit}.fa-border{border-color:var(--fa-border-color,#eee);border-radius:var(--fa-border-radius,.1em);border-style:var(--fa-border-style,solid);border-width:var(--fa-border-width,.08em);padding:var(--fa-border-padding,.2em .25em .15em)}.fa-pull-left{float:left;margin-right:var(--fa-pull-margin,.3em)}.fa-pull-right{float:right;margin-left:var(--fa-pull-margin,.3em)}.fa-beat{-webkit-animation-name:fa-beat;animation-name:fa-beat;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-bounce{-webkit-animation-name:fa-bounce;animation-name:fa-bounce;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1))}.fa-fade{-webkit-animation-name:fa-fade;animation-name:fa-fade;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-beat-fade{-webkit-animation-name:fa-beat-fade;animation-name:fa-beat-fade;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-flip{-webkit-animation-name:fa-flip;animation-name:fa-flip;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-shake{-webkit-animation-name:fa-shake;animation-name:fa-shake;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-spin{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,2s);animation-duration:var(--fa-animation-duration,2s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-spin-reverse{--fa-animation-direction:reverse}.fa-pulse,.fa-spin-pulse{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,steps(8));animation-timing-function:var(--fa-animation-timing,steps(8))}@media (prefers-reduced-motion:reduce){.fa-beat,.fa-beat-fade,.fa-bounce,.fa-fade,.fa-flip,.fa-pulse,.fa-shake,.fa-spin,.fa-spin-pulse{-webkit-animation-delay:-1ms;animation-delay:-1ms;-webkit-animation-duration:1ms;animation-duration:1ms;-webkit-animation-iteration-count:1;animation-iteration-count:1;-webkit-transition-delay:0s;transition-delay:0s;-webkit-transition-duration:0s;transition-duration:0s}}@-webkit-keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@-webkit-keyframes fa-bounce{0%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}100%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}}@keyframes fa-bounce{0%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}100%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}}@-webkit-keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@-webkit-keyframes fa-beat-fade{0%,100%{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@keyframes fa-beat-fade{0%,100%{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@-webkit-keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@-webkit-keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}24%,8%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}100%,40%{-webkit-transform:rotate(0);transform:rotate(0)}}@keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}24%,8%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}100%,40%{-webkit-transform:rotate(0);transform:rotate(0)}}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}.fa-rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-webkit-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{-webkit-transform:scale(1,-1);transform:scale(1,-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1,-1);transform:scale(-1,-1)}.fa-rotate-by{-webkit-transform:rotate(var(--fa-rotate-angle,0));transform:rotate(var(--fa-rotate-angle,0))}.fa-stack{display:inline-block;vertical-align:middle;height:2em;position:relative;width:2.5em}.fa-stack-1x,.fa-stack-2x{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0;z-index:var(--fa-stack-z-index,auto)}.svg-inline--fa.fa-stack-1x{height:1em;width:1.25em}.svg-inline--fa.fa-stack-2x{height:2em;width:2.5em}.fa-inverse{color:var(--fa-inverse,#fff)}.fa-sr-only,.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}.fa-sr-only-focusable:not(:focus),.sr-only-focusable:not(:focus){position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}.svg-inline--fa .fa-primary{fill:var(--fa-primary-color,currentColor);opacity:var(--fa-primary-opacity,1)}.svg-inline--fa .fa-secondary{fill:var(--fa-secondary-color,currentColor);opacity:var(--fa-secondary-opacity,.4)}.svg-inline--fa.fa-swap-opacity .fa-primary{opacity:var(--fa-secondary-opacity,.4)}.svg-inline--fa.fa-swap-opacity .fa-secondary{opacity:var(--fa-primary-opacity,1)}.svg-inline--fa mask .fa-primary,.svg-inline--fa mask .fa-secondary{fill:#000}.fa-duotone.fa-inverse,.fad.fa-inverse{color:var(--fa-inverse,#fff)}';return"fa"===a&&z===s||(c=new RegExp("\\.".concat("fa","\\-"),"g"),l=new RegExp("\\--".concat("fa","\\-"),"g"),s=new RegExp("\\.".concat(s),"g"),e=e.replace(c,".".concat(a,"-")).replace(l,"--".concat(a,"-")).replace(s,".".concat(z))),e}var Hc=!1;function tc(){B.autoAddCss&&!Hc&&(function(c){if(c&&L){var l=o.createElement("style");l.setAttribute("type","text/css"),l.innerHTML=c;for(var s=o.head.childNodes,a=null,z=s.length-1;-1").concat(z.map(nc).join(""),"")}function ic(c,l,s){if(c&&c[l]&&c[l][s])return{prefix:l,iconName:s,icon:c[l][s]}}function mc(c,l,s,a){for(var z,e,H=Object.keys(c),t=H.length,V=void 0!==a?oc(l,a):l,r=void 0===s?(z=1,c[H[0]]):(z=0,s);z{var l=c&&c.__esModule?()=>c.default:()=>c;return s.d(l,{a:l}),l},s.d=(c,l)=>{for(var a in l)s.o(l,a)&&!s.o(c,a)&&Object.defineProperty(c,a,{enumerable:!0,get:l[a]})},s.o=(c,l)=>Object.prototype.hasOwnProperty.call(c,l),(()=>{"use strict";s(654)})()})(); +//# sourceMappingURL=fontawesome.js.map \ No newline at end of file diff --git a/docs/_static/scripts/fontawesome.js.LICENSE.txt b/docs/_static/scripts/fontawesome.js.LICENSE.txt new file mode 100644 index 00000000..a91750af --- /dev/null +++ b/docs/_static/scripts/fontawesome.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com + * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) + * Copyright 2024 Fonticons, Inc. + */ diff --git a/docs/_static/scripts/fontawesome.js.map b/docs/_static/scripts/fontawesome.js.map new file mode 100644 index 00000000..8fa96a6d --- /dev/null +++ b/docs/_static/scripts/fontawesome.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/fontawesome.js","mappings":";uBAKC,WAAW,aAAa,IAAIA,EAAE,CAAC,EAAEC,EAAE,CAAC,EAAE,IAAI,oBAAoBC,SAASF,EAAEE,QAAQ,oBAAoBC,WAAWF,EAAEE,SAAS,CAAC,MAAMH,GAAG,CAAC,IAAkCI,OAAE,KAAhCC,GAAGL,EAAEM,WAAW,CAAC,GAAGC,WAAuB,GAAGF,EAAEG,EAAER,EAAES,EAAER,EAAyI,SAASS,EAAET,EAAED,GAAG,IAAIK,EAAED,EAAEO,OAAOC,KAAKX,GAAG,OAAOU,OAAOE,wBAAwBR,EAAEM,OAAOE,sBAAsBZ,GAAGD,IAAIK,EAAEA,EAAES,QAAO,SAASd,GAAG,OAAOW,OAAOI,yBAAyBd,EAAED,GAAGgB,UAAU,KAAIZ,EAAEa,KAAKC,MAAMd,EAAEC,IAAID,CAAC,CAAC,SAASe,EAAElB,GAAG,IAAI,IAAID,EAAE,EAAEA,EAAEoB,UAAUC,OAAOrB,IAAI,CAAC,IAAIK,EAAE,MAAMe,UAAUpB,GAAGoB,UAAUpB,GAAG,CAAC,EAAEA,EAAE,EAAEU,EAAEC,OAAON,IAAG,GAAIiB,SAAQ,SAAStB,GAAGuB,EAAEtB,EAAED,EAAEK,EAAEL,GAAG,IAAGW,OAAOa,0BAA0Bb,OAAOc,iBAAiBxB,EAAEU,OAAOa,0BAA0BnB,IAAIK,EAAEC,OAAON,IAAIiB,SAAQ,SAAStB,GAAGW,OAAOe,eAAezB,EAAED,EAAEW,OAAOI,yBAAyBV,EAAEL,GAAG,GAAE,CAAC,OAAOC,CAAC,CAAC,SAASsB,EAAEvB,EAAEC,EAAEI,GAAG,OAAOJ,KAAKD,EAAEW,OAAOe,eAAe1B,EAAEC,EAAE,CAAC0B,MAAMtB,EAAEW,YAAW,EAAGY,cAAa,EAAGC,UAAS,IAAK7B,EAAEC,GAAGI,EAAEL,CAAC,CAAC,SAAS8B,EAAE9B,EAAEC,IAAI,MAAMA,GAAGA,EAAED,EAAEqB,UAAUpB,EAAED,EAAEqB,QAAQ,IAAI,IAAIhB,EAAE,EAAED,EAAE,IAAI2B,MAAM9B,GAAGI,EAAEJ,EAAEI,IAAID,EAAEC,GAAGL,EAAEK,GAAG,OAAOD,CAAC,CAA95BI,EAAEL,SAASM,EAAEuB,iBAAiBvB,EAAEwB,MAAM,mBAAmBxB,EAAEyB,kBAAkBzB,EAAE0B,eAAe/B,EAAEgC,QAAQ,SAAShC,EAAEgC,QAAQ,YAAoyB,IAAIC,EAAE,qBAAqBC,EAAE,WAAW,IAAI,OAAM,CAAE,CAAC,MAAMtC,GAAG,OAAM,CAAE,CAAC,CAA1C,GAA8CuC,EAAE,UAAUC,EAAE,QAAQC,EAAE,CAACF,EAAEC,GAAG,SAASE,EAAE1C,GAAG,OAAO,IAAI2C,MAAM3C,EAAE,CAAC4C,IAAI,SAAS5C,EAAEC,GAAG,OAAOA,KAAKD,EAAEA,EAAEC,GAAGD,EAAEuC,EAAE,GAAG,CAACG,GAAGnB,EAAEsB,EAAE,CAAC,EAAEN,EAAE,CAACO,GAAG,QAAQC,IAAI,QAAQ,WAAW,QAAQC,IAAI,UAAU,aAAa,UAAUC,IAAI,QAAQ,WAAW,QAAQC,IAAI,OAAO,UAAU,OAAOC,IAAI,UAAU,aAAa,UAAUC,IAAI,SAAS,YAAY,SAASC,IAAI,MAAMC,KAAK,MAAM,SAAS,MAAM,iBAAiB,QAAQ/B,EAAEsB,EAAEL,EAAE,CAACM,GAAG,QAAQS,KAAK,QAAQ,WAAW,QAAQC,KAAK,UAAU,aAAa,UAAUC,KAAK,QAAQ,WAAW,QAAQC,KAAK,OAAO,UAAU,SAASb,IAAI,IAAIc,EAAEjB,GAAGnB,EAAEqC,EAAE,CAAC,EAAErB,EAAE,CAACsB,MAAM,MAAMC,QAAQ,MAAMC,MAAM,MAAMC,KAAK,MAAMC,QAAQ,MAAMC,OAAO,MAAMC,IAAI,QAAQ5C,EAAEqC,EAAEpB,EAAE,CAACqB,MAAM,OAAOC,QAAQ,OAAOC,MAAM,OAAOC,KAAK,SAASJ,IAA8mBf,GAA5CzC,GAA3jBsC,GAAGnB,EAAElB,EAAE,CAAC,EAAEkC,EAAE,CAACa,IAAI,YAAYD,IAAI,aAAaE,IAAI,SAASJ,IAAI,WAAWD,IAAI,aAAaD,IAAI,WAAWG,IAAI,YAAY3B,EAAElB,EAAEmC,EAAE,CAACe,KAAK,WAAWC,KAAK,aAAaC,KAAK,WAAWC,KAAK,YAAYrD,IAAIqC,GAAGnB,EAAEvB,EAAE,CAAC,EAAEuC,EAAE,CAAC,YAAY,MAAM,aAAa,MAAM,SAAS,MAAM,WAAW,MAAM,aAAa,MAAM,WAAW,MAAM,UAAU,QAAQhB,EAAEvB,EAAEwC,EAAE,CAAC,WAAW,OAAO,aAAa,OAAO,WAAW,OAAO,UAAU,SAASxC,IAAI0C,GAAGnB,EAAEtB,EAAE,CAAC,EAAEsC,EAAE,CAAC,IAAI,MAAM,IAAI,MAAM6B,OAAO,MAAM,IAAI,MAAM,IAAI,QAAQ7C,EAAEtB,EAAEuC,EAAE,CAAC,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO,IAAI,SAASvC,IAAliBQ,EAAsiB,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,KAAS4D,OAAO,CAAC,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,KAAO,iBAAgBT,EAAE,eAAevD,EAAE,UAAUL,EAAE,YAAYC,EAAE,IAAIqE,IAAI3D,OAAOC,KAAK+C,EAAEpB,IAAIgC,IAAItE,EAAEuE,IAAIC,KAAKxE,IAAIU,OAAOC,KAAK+C,EAAEnB,IAAI+B,IAAItE,EAAEuE,IAAIC,KAAKxE,IAAI,GAAGoE,OAAO5B,EAAE,SAASzC,GAAG,GAAG+B,MAAM2C,QAAQ1E,GAAG,OAAO8B,EAAE9B,EAAE,CAA3C,CAA+CC,IAAI,SAASD,GAAG,GAAG,oBAAoB2E,QAAQ,MAAM3E,EAAE2E,OAAOC,WAAW,MAAM5E,EAAE,cAAc,OAAO+B,MAAM8C,KAAK7E,EAAE,CAA/G,CAAiHC,IAAI,SAASD,EAAEC,GAAG,GAAGD,EAAE,CAAC,GAAG,iBAAiBA,EAAE,OAAO8B,EAAE9B,EAAEC,GAAG,IAAII,EAAEM,OAAOmE,UAAUC,SAASC,KAAKhF,GAAGiF,MAAM,GAAG,GAAG,MAAM,SAAS5E,EAAE,WAAWA,GAAGL,EAAEkF,YAAYlF,EAAEkF,YAAYC,KAAK9E,IAAI,QAAQA,EAAE0B,MAAM8C,KAAK7E,GAAG,cAAcK,GAAG,2CAA2C+E,KAAK/E,GAAGyB,EAAE9B,EAAEC,QAAG,CAAM,CAAC,CAA1R,CAA4RA,IAAI,WAAW,MAAM,IAAIoF,UAAU,uIAAuI,CAAtK,GAA0K,CAAC,MAAM,KAAK,KAAK,KAAK,KAAK,MAAM,OAAO,SAAS,OAAO,YAAY,SAAS,YAAY,kBAAkB,gBAAgB,OAAO,KAAK,UAAU,iBAAiB,cAAc,SAAS,KAAK,YAAY,aAAa,QAAQ,aAAa,aAAa,YAAY,YAAY,QAAQ,aAAa,eAAe,OAAO,WAAW,WAAW,QAAQ,KAAKxC,EAAEe,EAAEvD,EAAEL,IAAIqE,OAAO5D,EAAE8D,KAAI,SAASvE,GAAG,MAAM,GAAGqE,OAAOrE,EAAE,IAAI,KAAIqE,OAAOjE,EAAEmE,KAAI,SAASvE,GAAG,MAAM,KAAKqE,OAAOrE,EAAE,MAAIQ,EAAEA,GAAG,CAAC,GAAI6B,KAAK7B,EAAE6B,GAAG,CAAC,GAAG7B,EAAE6B,GAAGiD,SAAS9E,EAAE6B,GAAGiD,OAAO,CAAC,GAAG9E,EAAE6B,GAAGkD,QAAQ/E,EAAE6B,GAAGkD,MAAM,CAAC,GAAG/E,EAAE6B,GAAGmD,QAAQhF,EAAE6B,GAAGmD,MAAM,IAAI,IAAIC,EAAEjF,EAAE6B,GAAG,SAASqD,EAAEtF,GAAG,OAAOO,OAAOC,KAAKR,GAAGuF,QAAO,SAAS3F,EAAEC,GAAG,IAAII,EAAED,EAAEH,GAAG,OAAQI,EAAEuF,KAAK5F,EAAEK,EAAEwF,UAAUxF,EAAEuF,KAAK5F,EAAEC,GAAGI,EAAEL,CAAC,GAAE,CAAC,EAAE,CAAC,SAAS8F,EAAE9F,EAAEC,EAAEI,GAAyDA,OAAE,KAApDD,GAAG,EAAEgB,UAAUC,aAAQ,IAAShB,EAAEA,EAAE,CAAC,GAAG0F,YAAwB3F,EAApE,IAAsEA,EAAEsF,EAAEzF,GAAG,mBAAmBwF,EAAEF,MAAMS,SAAS3F,EAAEoF,EAAEH,OAAOtF,GAAGmB,EAAEA,EAAE,CAAC,EAAEsE,EAAEH,OAAOtF,IAAI,CAAC,GAAGI,GAAGqF,EAAEF,MAAMS,QAAQhG,EAAE0F,EAAEzF,IAAI,QAAQD,GAAG8F,EAAE,KAAK7F,EAAE,CAAC,IAAIgG,EAAE,CAACC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,0NAA0NC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,k4CAAk4CC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,yvBAAyvB,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,01BAA01BC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,6jBAA6jBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,mgBAAmgB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,0pBAA0pBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,0rEAA0rEC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,ujBAAujBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,4bAA4bC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,0gCAA0gCC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,+pBAA+pBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,0lBAA0lBC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,ybAAybC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,wLAAwLC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,2qBAA2qBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,ssBAAssBC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,6NAA6N,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,8oCAA8oCC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,4KAA4KC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,uYAAuYC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,uwBAAuwB,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,6mBAA6mBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,+GAA+GC,GAAG,CAAC,IAAI,IAAI,GAAG,OAAO,2jBAA2jBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,+WAA+WC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,klCAAklCC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,+lBAA+lB,0BAA0B,CAAC,IAAI,IAAI,GAAG,OAAO,8hBAA8hBC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,2XAA2X,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,0qBAA0qBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,iyCAAiyCC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,8VAA8V,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,09BAA09B,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,ufAAufC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,guBAAguBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,8pCAA8pCC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,uwCAAuwCC,GAAG,CAAC,IAAI,IAAI,GAAG,OAAO,szBAAszBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,shCAAshCC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,q/FAAq/F,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,iPAAiP,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,85BAA85B,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,iYAAiYC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,4dAA4d,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,4WAA4WC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,mmBAAmmBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,4jFAA4jFC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,yRAAyR,mBAAmB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,yyCAAyyC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,i8EAAi8E,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,68LAA68L,qBAAqB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,0UAA0UC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,0nCAA0nCC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,okCAAokCC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,w4BAAw4B,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,ojGAAojG,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,ieAAieC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,qMAAqMC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,2RAA2RC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,kQAAkQ,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,8YAA8YC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,0ZAA0ZC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,miBAAmiB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,+NAA+N,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,oyBAAoyB,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,i/CAAi/CC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,weAAweC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,8pBAA8pB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,6ZAA6Z,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,8fAA8fC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,mlDAAmlDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2PAA2P,6BAA6B,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,ieAAieC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,kSAAkS,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,gTAAgT,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,4SAA4SC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,8dAA8dC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,sZAAsZC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,q6BAAq6B,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,wLAAwL,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,uoCAAuoCC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,8sCAA8sC,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,s3BAAs3B,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,+zBAA+zBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,ojBAAojBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,w2BAAw2BC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,w+CAAw+CC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,6pBAA6pB,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8cAA8cC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,ulBAAulBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,+WAA+WC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,2IAA2I,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,+3PAA+3P,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,i1BAAi1BC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,wgEAAwgEC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,q0BAAq0BC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,+LAA+LC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,uIAAuIC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,irCAAirC,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,4jDAA4jD,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,2zBAA2zBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,6cAA6cC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,i+FAAi+FC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,oJAAoJC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,guBAAguB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,yrCAAyrCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,qpBAAqpBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,uuJAAuuJC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,kQAAkQC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,oKAAoKC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,s3BAAs3BC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,kvBAAkvB,iBAAiB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,2mBAA2mB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,qiBAAqiBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,sdAAsdC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,wgBAAwgBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,4HAA4HC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,wwBAAwwBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,mnBAAmnB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,0hBAA0hB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,8wBAA8wB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,g4FAAg4F,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,0kBAA0kBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,y+DAAy+DC,YAAY,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,mXAAmXC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,ghDAAghDC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,45BAA45BC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,g4CAAg4CC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,+LAA+L,iBAAiB,CAAC,IAAI,IAAI,CAAC,MAAM,kBAAkB,OAAO,gcAAgc,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,6rBAA6rBC,QAAQ,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,gnCAAgnCC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,0xCAA0xC,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,u/CAAu/C,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,s6JAAs6JC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,qsBAAqsBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,iyBAAiyBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,u7BAAu7B,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,mTAAmTC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,2vBAA2vBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,imDAAimDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,uSAAuS,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,uSAAuS,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,8sBAA8sB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,uhDAAuhD,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,4kBAA4kBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,y1CAAy1CC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,gmBAAgmBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,u7BAAu7BC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,kxCAAkxCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,iiBAAiiB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,0oBAA0oB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,yoCAAyoCC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,sfAAsfC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,ogBAAogBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,8vEAA8vEC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,6iBAA6iBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,+LAA+L,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,2rBAA2rBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,6/BAA6/BC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,ooBAAooBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,yFAAyFC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,wzCAAwzC,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,2oBAA2oB,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ipCAAipC,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,qhBAAqhBC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,grBAAgrB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,MAAM,oBAAoB,0BAA0B,OAAO,gQAAgQC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,iQAAiQC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,ubAAubC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,4uBAA4uBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8vCAA8vCC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,i9CAAi9C,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,0lCAA0lCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,upBAAupB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,8zGAA8zG,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,k9CAAk9CC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,saAAsaC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,uuBAAuuB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,g3EAAg3EC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ytCAAytCC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,2WAA2W,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,ooCAAooCC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,+vBAA+vBC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,guBAAguBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,q8CAAq8CC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,mKAAmKC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+iBAA+iBC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,m1DAAm1D,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,qIAAqIC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2uDAA2uDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,kSAAkS,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,maAAma,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,soBAAsoBC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,2eAA2e,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,2hBAA2hBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,ogCAAogC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4PAA4PC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,sjEAAsjEC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,oiBAAoiBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,+tBAA+tB,uBAAuB,CAAC,IAAI,IAAI,CAAC,wBAAwB,OAAO,smBAAsmBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,myDAAmyDC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,8uBAA8uBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,moGAAmoGC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,wtBAAwtBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,uWAAuWC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,gxFAAgxF,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,spDAAspDC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,qlCAAqlC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,4rCAA4rC,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,qiBAAqiBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ikCAAikCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,m6CAAm6CC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,kaAAka,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,qqCAAqqCC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,m4BAAm4B,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,gjFAAgjFC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,uwBAAuwBC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,+rBAA+rBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,mmCAAmmCC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,kEAAkEC,GAAG,CAAC,IAAI,IAAI,GAAG,OAAO,yQAAyQC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,2vBAA2vB,mBAAmB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,8rBAA8rBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,+XAA+XC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,wlBAAwlB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,skBAAskBC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,slBAAslBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8UAA8UC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,ouDAAouD,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,wzBAAwzBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,8VAA8VC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,yaAAya,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,+uDAA+uDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,6iBAA6iBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,gwBAAgwBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,sUAAsU,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,+yBAA+yBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,8MAA8MC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,0dAA0dC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,wUAAwU,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,0pBAA0pBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,+sBAA+sBC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,2kBAA2kB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,0jCAA0jCC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+NAA+NC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,mGAAmG,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4WAA4W,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,4uBAA4uBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,8NAA8NC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+eAA+eC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,gWAAgWC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,4WAA4WC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,yZAAyZ,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,wiGAAwiG,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,yKAAyK,qBAAqB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,6aAA6a,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,qYAAqYC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,wxLAAwxL,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,86CAA86CC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,+2BAA+2B,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,0gBAA0gB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,g1HAAg1HC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,ohBAAohBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,uQAAuQC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,2iBAA2iBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,6hBAA6hBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,yWAAyWC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,oxBAAoxB,cAAc,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,wfAAwfC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,wGAAwGC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,kgBAAkgBC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,swCAAswCC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,6eAA6eC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,iWAAiWC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,2MAA2MC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,yxBAAyxB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,mkJAAmkJC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,wpBAAwpBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,qjEAAqjEC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,42BAA42BC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,8PAA8PC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,mmCAAmmCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,+SAA+SC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,q2CAAq2CC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,gkEAAgkEC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,oTAAoT,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,oaAAoaC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,gmBAAgmBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,mlCAAmlCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,gOAAgOC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,0kBAA0kB,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,8dAA8dC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,kfAAkf,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,uIAAuIC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,mjDAAmjDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,oWAAoW,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,gKAAgK,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,8rCAA8rCC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,oPAAoP,WAAW,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,0pBAA0pB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,gOAAgOC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,wNAAwN,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,+RAA+R,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,sTAAsTC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,8vBAA8vBC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,oUAAoUC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,6dAA6dC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,+3BAA+3B,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,ymBAAymBC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,yqBAAyqBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,+NAA+N,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,+QAA+QC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,gVAAgVC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,o6GAAo6GC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,2wBAA2wBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,4aAA4aC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ovBAAovBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,gwBAAgwBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,4MAA4MC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,miDAAmiDC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,8aAA8a,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,yvBAAyvBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,mfAAmfC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,opBAAopBC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,48BAA48BC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,q6CAAq6C,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,4jBAA4jB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,wxGAAwxGC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,0OAA0OC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,85BAA85BC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,4tDAA4tDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,6gBAA6gBC,SAAS,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,gRAAgRC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,smBAAsmB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,opBAAopBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,+JAA+J,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,+hHAA+hHC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,w7BAAw7BC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,g4DAAg4DC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,2zDAA2zD,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,sxCAAsxCC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,2MAA2MC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,03BAA03B,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,+qBAA+qBC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,kxBAAkxB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,0PAA0PC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,quBAAquBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,smCAAsmC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,6IAA6IC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,8KAA8KC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,ycAAycC,UAAU,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,sOAAsOC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,m/DAAm/DC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,uRAAuR,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,+pHAA+pHC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,+NAA+NC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2kBAA2kBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,wnEAAwnEC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,sSAAsSC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,sHAAsH,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,21IAA21IC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,wiBAAwiB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,4gBAA4gB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,8UAA8U,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,giBAAgiB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,uQAAuQC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,8MAA8MC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,29BAA29BC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,2MAA2MC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,8mCAA8mC,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,qfAAqfC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,m8FAAm8FC,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,iPAAiPC,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,8YAA8YC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,0mIAA0mI,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,6vBAA6vBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ozBAAozBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,4gEAA4gE,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,mfAAmfC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,qnDAAqnD,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,+6BAA+6BC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,i9BAAi9BC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+iBAA+iB,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,+rBAA+rBC,SAAS,CAAC,IAAI,IAAI,CAAC,MAAM,kBAAkB,OAAO,0jGAA0jG,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,mgCAAmgCC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,wvHAAwvHC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,goCAAgoC,iBAAiB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,otBAAotBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,omDAAomDC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,qKAAqKC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,2iBAA2iBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,mIAAmIC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,+sBAA+sBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,kPAAkP,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,2LAA2LC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,gLAAgLC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,20CAA20C,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,uoBAAuoB,aAAa,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,6oCAA6oC,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,keAAkeC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,wZAAwZ,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,0fAA0f,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,4VAA4VC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,8pBAA8pBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,syKAAsyKC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,o6BAAo6BC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,s+FAAs+FC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+jCAA+jC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,qGAAqGC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,wqBAAwqBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,gwBAAgwBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,iTAAiTC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,o8DAAo8D,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,spCAAspCC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,0/BAA0/BC,GAAG,CAAC,IAAI,IAAI,GAAG,OAAO,uhBAAuhBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,6wBAA6wBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,+/BAA+/BC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,irBAAirBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,8UAA8U,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,gFAAgF,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,6lBAA6lBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,gYAAgYC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,4xCAA4xCC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,wwBAAwwBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,6pBAA6pBC,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,0dAA0dC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,upBAAupB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,6mDAA6mDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,qsCAAqsCC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,stDAAstDC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,yqIAAyqIC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,2xBAA2xBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,2QAA2QC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8pBAA8pBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,ynBAAynBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,kLAAkLC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,w3CAAw3CC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,0qBAA0qBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,+hCAA+hC,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,+fAA+f,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,61BAA61BC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,uUAAuUC,SAAS,CAAC,IAAI,IAAI,CAAC,MAAM,kBAAkB,OAAO,ksBAAksB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,u4VAAu4VC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,q9DAAq9D,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,wjCAAwjC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,gkCAAgkC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,8ZAA8ZC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,cAAc,OAAO,i+BAAi+BC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,wvBAAwvBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,6qBAA6qBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,iXAAiXC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,irBAAirBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,gfAAgf,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,0QAA0QC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,oyKAAoyKC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,YAAY,OAAO,qaAAqaC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,ihBAAihBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,mbAAmbC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,gFAAgFC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,gKAAgK,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,i5NAAi5NC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,gRAAgRC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,2QAA2QC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,6iCAA6iCC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,2mBAA2mBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,qwBAAqwB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,41EAA41E,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,4wBAA4wBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,69EAA69E,iCAAiC,CAAC,IAAI,IAAI,GAAG,OAAO,ukCAAukCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,4GAA4GC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,i0DAAi0D,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,sxBAAsxBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,0xDAA0xDC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,mdAAmd,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,u3BAAu3BC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,84CAA84CC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,2lCAA2lC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,63CAA63CC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,0kBAA0kBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,47CAA47CC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,8PAA8PC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,43BAA43B,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4mBAA4mB,SAASnb,GAAG,IAAI,IAAI,IAAIC,EAAEmB,UAAUC,OAAOhB,EAAE,IAAI0B,MAAM,EAAE9B,EAAEA,EAAE,EAAE,GAAGG,EAAE,EAAEA,EAAEH,EAAEG,IAAIC,EAAED,EAAE,GAAGgB,UAAUhB,GAAGJ,EAAEkB,WAAM,EAAOb,EAAE,CAAC,MAAML,GAAG,IAAIsC,EAAE,MAAMtC,CAAC,CAAC,CAA3I,EAA6I,WAAW8F,EAAE,MAAMG,GAAGH,EAAE,YAAYG,EAAE,GAAE,CAArrne,GAAyrne,WAAW,aAAa,IAAIjG,EAAE,CAAC,EAAEC,EAAE,CAAC,EAAE,IAAI,oBAAoBC,SAASF,EAAEE,QAAQ,oBAAoBC,WAAWF,EAAEE,SAAS,CAAC,MAAMH,GAAG,CAAC,IAAkCI,OAAE,KAAhCC,GAAGL,EAAEM,WAAW,CAAC,GAAGC,WAAuB,GAAGF,EAAEG,EAAER,EAAES,EAAER,EAAyI,SAASS,EAAET,EAAED,GAAG,IAAIK,EAAED,EAAEO,OAAOC,KAAKX,GAAG,OAAOU,OAAOE,wBAAwBR,EAAEM,OAAOE,sBAAsBZ,GAAGD,IAAIK,EAAEA,EAAES,QAAO,SAASd,GAAG,OAAOW,OAAOI,yBAAyBd,EAAED,GAAGgB,UAAU,KAAIZ,EAAEa,KAAKC,MAAMd,EAAEC,IAAID,CAAC,CAAC,SAASe,EAAElB,GAAG,IAAI,IAAID,EAAE,EAAEA,EAAEoB,UAAUC,OAAOrB,IAAI,CAAC,IAAIK,EAAE,MAAMe,UAAUpB,GAAGoB,UAAUpB,GAAG,CAAC,EAAEA,EAAE,EAAEU,EAAEC,OAAON,IAAG,GAAIiB,SAAQ,SAAStB,GAAGuB,EAAEtB,EAAED,EAAEK,EAAEL,GAAG,IAAGW,OAAOa,0BAA0Bb,OAAOc,iBAAiBxB,EAAEU,OAAOa,0BAA0BnB,IAAIK,EAAEC,OAAON,IAAIiB,SAAQ,SAAStB,GAAGW,OAAOe,eAAezB,EAAED,EAAEW,OAAOI,yBAAyBV,EAAEL,GAAG,GAAE,CAAC,OAAOC,CAAC,CAAC,SAASsB,EAAEvB,EAAEC,EAAEI,GAAG,OAAOJ,KAAKD,EAAEW,OAAOe,eAAe1B,EAAEC,EAAE,CAAC0B,MAAMtB,EAAEW,YAAW,EAAGY,cAAa,EAAGC,UAAS,IAAK7B,EAAEC,GAAGI,EAAEL,CAAC,CAAC,SAAS8B,EAAE9B,EAAEC,IAAI,MAAMA,GAAGA,EAAED,EAAEqB,UAAUpB,EAAED,EAAEqB,QAAQ,IAAI,IAAIhB,EAAE,EAAED,EAAE,IAAI2B,MAAM9B,GAAGI,EAAEJ,EAAEI,IAAID,EAAEC,GAAGL,EAAEK,GAAG,OAAOD,CAAC,CAA95BI,EAAEL,SAASM,EAAEuB,iBAAiBvB,EAAEwB,MAAM,mBAAmBxB,EAAEyB,kBAAkBzB,EAAE0B,eAAe/B,EAAEgC,QAAQ,SAAShC,EAAEgC,QAAQ,YAAoyB,IAAIC,EAAE,qBAAqBC,EAAE,WAAW,IAAI,OAAM,CAAE,CAAC,MAAMtC,GAAG,OAAM,CAAE,CAAC,CAA1C,GAA8CuC,EAAE,UAAUC,EAAE,QAAQC,EAAE,CAACF,EAAEC,GAAG,SAASE,EAAE1C,GAAG,OAAO,IAAI2C,MAAM3C,EAAE,CAAC4C,IAAI,SAAS5C,EAAEC,GAAG,OAAOA,KAAKD,EAAEA,EAAEC,GAAGD,EAAEuC,EAAE,GAAG,CAACG,GAAGnB,EAAEsB,EAAE,CAAC,EAAEN,EAAE,CAACO,GAAG,QAAQC,IAAI,QAAQ,WAAW,QAAQC,IAAI,UAAU,aAAa,UAAUC,IAAI,QAAQ,WAAW,QAAQC,IAAI,OAAO,UAAU,OAAOC,IAAI,UAAU,aAAa,UAAUC,IAAI,SAAS,YAAY,SAASC,IAAI,MAAMC,KAAK,MAAM,SAAS,MAAM,iBAAiB,QAAQ/B,EAAEsB,EAAEL,EAAE,CAACM,GAAG,QAAQS,KAAK,QAAQ,WAAW,QAAQC,KAAK,UAAU,aAAa,UAAUC,KAAK,QAAQ,WAAW,QAAQC,KAAK,OAAO,UAAU,SAASb,IAAI,IAAIc,EAAEjB,GAAGnB,EAAEqC,EAAE,CAAC,EAAErB,EAAE,CAACsB,MAAM,MAAMC,QAAQ,MAAMC,MAAM,MAAMC,KAAK,MAAMC,QAAQ,MAAMC,OAAO,MAAMC,IAAI,QAAQ5C,EAAEqC,EAAEpB,EAAE,CAACqB,MAAM,OAAOC,QAAQ,OAAOC,MAAM,OAAOC,KAAK,SAASJ,IAA8mBf,GAA5CzC,GAA3jBsC,GAAGnB,EAAElB,EAAE,CAAC,EAAEkC,EAAE,CAACa,IAAI,YAAYD,IAAI,aAAaE,IAAI,SAASJ,IAAI,WAAWD,IAAI,aAAaD,IAAI,WAAWG,IAAI,YAAY3B,EAAElB,EAAEmC,EAAE,CAACe,KAAK,WAAWC,KAAK,aAAaC,KAAK,WAAWC,KAAK,YAAYrD,IAAIqC,GAAGnB,EAAEvB,EAAE,CAAC,EAAEuC,EAAE,CAAC,YAAY,MAAM,aAAa,MAAM,SAAS,MAAM,WAAW,MAAM,aAAa,MAAM,WAAW,MAAM,UAAU,QAAQhB,EAAEvB,EAAEwC,EAAE,CAAC,WAAW,OAAO,aAAa,OAAO,WAAW,OAAO,UAAU,SAASxC,IAAI0C,GAAGnB,EAAEtB,EAAE,CAAC,EAAEsC,EAAE,CAAC,IAAI,MAAM,IAAI,MAAM6B,OAAO,MAAM,IAAI,MAAM,IAAI,QAAQ7C,EAAEtB,EAAEuC,EAAE,CAAC,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO,IAAI,SAASvC,IAAliBQ,EAAsiB,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,KAAS4D,OAAO,CAAC,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,KAAO,iBAAgBT,EAAE,eAAevD,EAAE,UAAUL,EAAE,YAAYC,EAAE,IAAIqE,IAAI3D,OAAOC,KAAK+C,EAAEpB,IAAIgC,IAAItE,EAAEuE,IAAIC,KAAKxE,IAAIU,OAAOC,KAAK+C,EAAEnB,IAAI+B,IAAItE,EAAEuE,IAAIC,KAAKxE,IAAI,GAAGoE,OAAO5B,EAAE,SAASzC,GAAG,GAAG+B,MAAM2C,QAAQ1E,GAAG,OAAO8B,EAAE9B,EAAE,CAA3C,CAA+CC,IAAI,SAASD,GAAG,GAAG,oBAAoB2E,QAAQ,MAAM3E,EAAE2E,OAAOC,WAAW,MAAM5E,EAAE,cAAc,OAAO+B,MAAM8C,KAAK7E,EAAE,CAA/G,CAAiHC,IAAI,SAASD,EAAEC,GAAG,GAAGD,EAAE,CAAC,GAAG,iBAAiBA,EAAE,OAAO8B,EAAE9B,EAAEC,GAAG,IAAII,EAAEM,OAAOmE,UAAUC,SAASC,KAAKhF,GAAGiF,MAAM,GAAG,GAAG,MAAM,SAAS5E,EAAE,WAAWA,GAAGL,EAAEkF,YAAYlF,EAAEkF,YAAYC,KAAK9E,IAAI,QAAQA,EAAE0B,MAAM8C,KAAK7E,GAAG,cAAcK,GAAG,2CAA2C+E,KAAK/E,GAAGyB,EAAE9B,EAAEC,QAAG,CAAM,CAAC,CAA1R,CAA4RA,IAAI,WAAW,MAAM,IAAIoF,UAAU,uIAAuI,CAAtK,GAA0K,CAAC,MAAM,KAAK,KAAK,KAAK,KAAK,MAAM,OAAO,SAAS,OAAO,YAAY,SAAS,YAAY,kBAAkB,gBAAgB,OAAO,KAAK,UAAU,iBAAiB,cAAc,SAAS,KAAK,YAAY,aAAa,QAAQ,aAAa,aAAa,YAAY,YAAY,QAAQ,aAAa,eAAe,OAAO,WAAW,WAAW,QAAQ,KAAKxC,EAAEe,EAAEvD,EAAEL,IAAIqE,OAAO5D,EAAE8D,KAAI,SAASvE,GAAG,MAAM,GAAGqE,OAAOrE,EAAE,IAAI,KAAIqE,OAAOjE,EAAEmE,KAAI,SAASvE,GAAG,MAAM,KAAKqE,OAAOrE,EAAE,MAAIQ,EAAEA,GAAG,CAAC,GAAI6B,KAAK7B,EAAE6B,GAAG,CAAC,GAAG7B,EAAE6B,GAAGiD,SAAS9E,EAAE6B,GAAGiD,OAAO,CAAC,GAAG9E,EAAE6B,GAAGkD,QAAQ/E,EAAE6B,GAAGkD,MAAM,CAAC,GAAG/E,EAAE6B,GAAGmD,QAAQhF,EAAE6B,GAAGmD,MAAM,IAAI,IAAIC,EAAEjF,EAAE6B,GAAG,SAASqD,EAAEtF,GAAG,OAAOO,OAAOC,KAAKR,GAAGuF,QAAO,SAAS3F,EAAEC,GAAG,IAAII,EAAED,EAAEH,GAAG,OAAQI,EAAEuF,KAAK5F,EAAEK,EAAEwF,UAAUxF,EAAEuF,KAAK5F,EAAEC,GAAGI,EAAEL,CAAC,GAAE,CAAC,EAAE,CAAC,SAAS8F,EAAE9F,EAAEC,EAAEI,GAAyDA,OAAE,KAApDD,GAAG,EAAEgB,UAAUC,aAAQ,IAAShB,EAAEA,EAAE,CAAC,GAAG0F,YAAwB3F,EAApE,IAAsEA,EAAEsF,EAAEzF,GAAG,mBAAmBwF,EAAEF,MAAMS,SAAS3F,EAAEoF,EAAEH,OAAOtF,GAAGmB,EAAEA,EAAE,CAAC,EAAEsE,EAAEH,OAAOtF,IAAI,CAAC,GAAGI,GAAGqF,EAAEF,MAAMS,QAAQhG,EAAE0F,EAAEzF,IAAI,QAAQD,GAAG8F,EAAE,KAAK7F,EAAE,CAAC,IAAIgG,EAAE,CAAC,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,aAAa,OAAO,unBAAunBmV,QAAQ,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,8XAA8X,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,MAAM,WAAW,aAAa,OAAO,8aAA8a,gBAAgB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,+bAA+b,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,o7BAAo7B,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,8lCAA8lC,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,21BAA21B,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,mlBAAmlBC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,6mCAA6mCC,MAAM,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,6hBAA6hB,0BAA0B,CAAC,IAAI,IAAI,CAAC,OAAO,sBAAsB,OAAO,i5BAAi5B,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,mcAAmc,qBAAqB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,2YAA2Y,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,wSAAwSC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qTAAqT,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,6YAA6Y,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,20DAA20DC,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,8vBAA8vBC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,2dAA2d,eAAe,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,MAAM,gBAAgB,OAAO,+VAA+V,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,iJAAiJ,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,6lBAA6lBC,OAAO,CAAC,IAAI,IAAI,CAAC,KAAK,cAAc,eAAe,OAAO,i4BAAi4B,gBAAgB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2pBAA2pB,iBAAiB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,0eAA0e,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,0hCAA0hCC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,cAAc,OAAO,26BAA26B,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,y+BAAy+B,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,umCAAumC,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,qpBAAqpB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,y1BAAy1B,iBAAiB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,ggBAAggB,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,iaAAia,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,qWAAqW,gBAAgB,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,23BAA23BC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,2NAA2N,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,06BAA06BC,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,MAAM,gBAAgB,OAAO,wTAAwTC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,+SAA+S,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,yYAAyYC,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,OAAO,klBAAklB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,m2BAAm2B,oBAAoB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,wjBAAwjB,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,4TAA4T,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,4YAA4YC,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4WAA4W,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,ozBAAozB,iBAAiB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,uhCAAuhC,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,wYAAwY,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,oaAAoa,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,wVAAwV,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,mYAAmYC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,mWAAmW,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,gVAAgVC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,02BAA02BC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,80CAA80CC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,oVAAoV,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,sRAAsR,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,oPAAoP,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,kNAAkN,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,oZAAoZ,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,whCAAwhCC,WAAW,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,mWAAmW,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,SAAS,OAAO,0gBAA0gB,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,usBAAusB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,MAAM,oBAAoB,0BAA0B,OAAO,qVAAqV,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,4hBAA4hB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,uiBAAuiB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,waAAwa,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,ucAAuc,cAAc,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,kuBAAkuBC,OAAO,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,KAAK,OAAO,OAAO,+MAA+MC,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,OAAO,0qCAA0qCC,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,mvBAAmvB,uBAAuB,CAAC,IAAI,IAAI,CAAC,OAAO,mBAAmB,OAAO,opDAAopD,mBAAmB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,ifAAif,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,0hBAA0hB,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,skCAAskC,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,88CAA88C,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,0BAA0B,OAAO,uVAAuV,oBAAoB,CAAC,IAAI,IAAI,CAAC,OAAO,oBAAoB,OAAO,inBAAinB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,2qBAA2qBC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,KAAK,MAAM,OAAO,OAAO,4sBAA4sB,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,yOAAyO,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,kRAAkRC,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,KAAK,KAAK,KAAK,MAAM,MAAM,OAAO,OAAO,qGAAqG,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,uBAAuB,OAAO,qVAAqV,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,2mBAA2mB,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6gBAA6gB,kBAAkB,CAAC,IAAI,IAAI,CAAC,MAAM,mBAAmB,OAAO,oeAAoe,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,6LAA6LC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ktBAAktB,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,+vBAA+vB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,yhBAAyhB,kBAAkB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,mSAAmS,aAAa,CAAC,IAAI,IAAI,CAAC,KAAK,SAAS,OAAO,gZAAgZ,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,QAAQ,OAAO,maAAma,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,cAAc,OAAO,0wBAA0wB,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,qmBAAqmB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,ikCAAikC,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,qiCAAqiC,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,22DAA22D,iBAAiB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,wfAAwf,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,ueAAue,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,mwBAAmwBC,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,6qBAA6qBC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,OAAO,OAAO,mUAAmUC,UAAU,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,mBAAmB,OAAO,2eAA2e,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,waAAwa,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,gZAAgZ,yBAAyB,CAAC,IAAI,IAAI,CAAC,OAAO,qBAAqB,OAAO,4vCAA4vC,iBAAiB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,smBAAsmB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,idAAid,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,yBAAyB,OAAO,sVAAsV,eAAe,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,yaAAyaC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,wOAAwOC,SAAS,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,6wCAA6wC,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,2sBAA2sBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,yhBAAyhB,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,wWAAwW,WAAW,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,sRAAsR,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,mBAAmB,OAAO,wbAAwbC,IAAI,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,8+BAA8+B,kBAAkB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,odAAod,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,yBAAyB,OAAO,uVAAuV,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,ujCAAujC,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,yfAAyf,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,mBAAmB,OAAO,mZAAmZC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,+fAA+fC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,mQAAmQC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,eAAe,iBAAiB,OAAO,kkHAAkkH,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0tBAA0tB,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,mXAAmX,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,qoBAAqoB,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,obAAobC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,maAAma,oBAAoB,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,i+BAAi+BC,UAAU,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,4TAA4ThZ,IAAI,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,6XAA6X,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,wpBAAwpB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,uiBAAuiB,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,oYAAoY,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,8iCAA8iC,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,0nBAA0nBiZ,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,ooBAAooB,uBAAuB,CAAC,IAAI,IAAI,CAAC,OAAO,mBAAmB,OAAO,ygDAAygDC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,OAAO,+qBAA+qBC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,uRAAuR,wBAAwB,CAAC,IAAI,IAAI,CAAC,OAAO,oBAAoB,OAAO,m4BAAm4BC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,yXAAyX,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,ytBAAytB,kBAAkB,CAAC,IAAI,IAAI,CAAC,MAAM,kBAAkB,kBAAkB,gBAAgB,OAAO,qbAAqb,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,8TAA8T,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,m/BAAm/B,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,saAAsa,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,mXAAmX,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,gBAAgB,OAAO,0UAA0U,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,sjCAAsjC,kBAAkB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2FAA2F,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,MAAM,OAAO,OAAO,OAAO,MAAM,OAAO,OAAO,sEAAsE,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,4QAA4Q,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,whBAAwhB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,s7BAAs7B,SAAS3d,GAAG,IAAI,IAAI,IAAIC,EAAEmB,UAAUC,OAAOhB,EAAE,IAAI0B,MAAM,EAAE9B,EAAEA,EAAE,EAAE,GAAGG,EAAE,EAAEA,EAAEH,EAAEG,IAAIC,EAAED,EAAE,GAAGgB,UAAUhB,GAAGJ,EAAEkB,WAAM,EAAOb,EAAE,CAAC,MAAML,GAAG,IAAIsC,EAAE,MAAMtC,CAAC,CAAC,CAA3I,EAA6I,WAAW8F,EAAE,MAAMG,GAAGH,EAAE,aAAaG,EAAE,GAAE,CAArr+G,GAAyr+G,WAAW,aAAa,IAAIjG,EAAE,CAAC,EAAEC,EAAE,CAAC,EAAE,IAAI,oBAAoBC,SAASF,EAAEE,QAAQ,oBAAoBC,WAAWF,EAAEE,SAAS,CAAC,MAAMH,GAAG,CAAC,IAAkCI,OAAE,KAAhCC,GAAGL,EAAEM,WAAW,CAAC,GAAGC,WAAuB,GAAGF,EAAEG,EAAER,EAAES,EAAER,EAAyI,SAASS,EAAET,EAAED,GAAG,IAAIK,EAAED,EAAEO,OAAOC,KAAKX,GAAG,OAAOU,OAAOE,wBAAwBR,EAAEM,OAAOE,sBAAsBZ,GAAGD,IAAIK,EAAEA,EAAES,QAAO,SAASd,GAAG,OAAOW,OAAOI,yBAAyBd,EAAED,GAAGgB,UAAU,KAAIZ,EAAEa,KAAKC,MAAMd,EAAEC,IAAID,CAAC,CAAC,SAASe,EAAElB,GAAG,IAAI,IAAID,EAAE,EAAEA,EAAEoB,UAAUC,OAAOrB,IAAI,CAAC,IAAIK,EAAE,MAAMe,UAAUpB,GAAGoB,UAAUpB,GAAG,CAAC,EAAEA,EAAE,EAAEU,EAAEC,OAAON,IAAG,GAAIiB,SAAQ,SAAStB,GAAGuB,EAAEtB,EAAED,EAAEK,EAAEL,GAAG,IAAGW,OAAOa,0BAA0Bb,OAAOc,iBAAiBxB,EAAEU,OAAOa,0BAA0BnB,IAAIK,EAAEC,OAAON,IAAIiB,SAAQ,SAAStB,GAAGW,OAAOe,eAAezB,EAAED,EAAEW,OAAOI,yBAAyBV,EAAEL,GAAG,GAAE,CAAC,OAAOC,CAAC,CAAC,SAASsB,EAAEvB,EAAEC,EAAEI,GAAG,OAAOJ,KAAKD,EAAEW,OAAOe,eAAe1B,EAAEC,EAAE,CAAC0B,MAAMtB,EAAEW,YAAW,EAAGY,cAAa,EAAGC,UAAS,IAAK7B,EAAEC,GAAGI,EAAEL,CAAC,CAAC,SAAS8B,EAAE9B,EAAEC,IAAI,MAAMA,GAAGA,EAAED,EAAEqB,UAAUpB,EAAED,EAAEqB,QAAQ,IAAI,IAAIhB,EAAE,EAAED,EAAE,IAAI2B,MAAM9B,GAAGI,EAAEJ,EAAEI,IAAID,EAAEC,GAAGL,EAAEK,GAAG,OAAOD,CAAC,CAA95BI,EAAEL,SAASM,EAAEuB,iBAAiBvB,EAAEwB,MAAM,mBAAmBxB,EAAEyB,kBAAkBzB,EAAE0B,eAAe/B,EAAEgC,QAAQ,SAAShC,EAAEgC,QAAQ,YAAoyB,IAAIC,EAAE,qBAAqBC,EAAE,WAAW,IAAI,OAAM,CAAE,CAAC,MAAMtC,GAAG,OAAM,CAAE,CAAC,CAA1C,GAA8CuC,EAAE,UAAUC,EAAE,QAAQC,EAAE,CAACF,EAAEC,GAAG,SAASE,EAAE1C,GAAG,OAAO,IAAI2C,MAAM3C,EAAE,CAAC4C,IAAI,SAAS5C,EAAEC,GAAG,OAAOA,KAAKD,EAAEA,EAAEC,GAAGD,EAAEuC,EAAE,GAAG,CAACG,GAAGnB,EAAEsB,EAAE,CAAC,EAAEN,EAAE,CAACO,GAAG,QAAQC,IAAI,QAAQ,WAAW,QAAQC,IAAI,UAAU,aAAa,UAAUC,IAAI,QAAQ,WAAW,QAAQC,IAAI,OAAO,UAAU,OAAOC,IAAI,UAAU,aAAa,UAAUC,IAAI,SAAS,YAAY,SAASC,IAAI,MAAMC,KAAK,MAAM,SAAS,MAAM,iBAAiB,QAAQ/B,EAAEsB,EAAEL,EAAE,CAACM,GAAG,QAAQS,KAAK,QAAQ,WAAW,QAAQC,KAAK,UAAU,aAAa,UAAUC,KAAK,QAAQ,WAAW,QAAQC,KAAK,OAAO,UAAU,SAASb,IAAI,IAAIc,EAAEjB,GAAGnB,EAAEqC,EAAE,CAAC,EAAErB,EAAE,CAACsB,MAAM,MAAMC,QAAQ,MAAMC,MAAM,MAAMC,KAAK,MAAMC,QAAQ,MAAMC,OAAO,MAAMC,IAAI,QAAQ5C,EAAEqC,EAAEpB,EAAE,CAACqB,MAAM,OAAOC,QAAQ,OAAOC,MAAM,OAAOC,KAAK,SAASJ,IAA8mBf,GAA5CzC,GAA3jBsC,GAAGnB,EAAElB,EAAE,CAAC,EAAEkC,EAAE,CAACa,IAAI,YAAYD,IAAI,aAAaE,IAAI,SAASJ,IAAI,WAAWD,IAAI,aAAaD,IAAI,WAAWG,IAAI,YAAY3B,EAAElB,EAAEmC,EAAE,CAACe,KAAK,WAAWC,KAAK,aAAaC,KAAK,WAAWC,KAAK,YAAYrD,IAAIqC,GAAGnB,EAAEvB,EAAE,CAAC,EAAEuC,EAAE,CAAC,YAAY,MAAM,aAAa,MAAM,SAAS,MAAM,WAAW,MAAM,aAAa,MAAM,WAAW,MAAM,UAAU,QAAQhB,EAAEvB,EAAEwC,EAAE,CAAC,WAAW,OAAO,aAAa,OAAO,WAAW,OAAO,UAAU,SAASxC,IAAI0C,GAAGnB,EAAEtB,EAAE,CAAC,EAAEsC,EAAE,CAAC,IAAI,MAAM,IAAI,MAAM6B,OAAO,MAAM,IAAI,MAAM,IAAI,QAAQ7C,EAAEtB,EAAEuC,EAAE,CAAC,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO,IAAI,SAASvC,IAAliBQ,EAAsiB,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,KAAS4D,OAAO,CAAC,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,KAAO,iBAAgBT,EAAE,eAAevD,EAAE,UAAUL,EAAE,YAAYC,EAAE,IAAIqE,IAAI3D,OAAOC,KAAK+C,EAAEpB,IAAIgC,IAAItE,EAAEuE,IAAIC,KAAKxE,IAAIU,OAAOC,KAAK+C,EAAEnB,IAAI+B,IAAItE,EAAEuE,IAAIC,KAAKxE,IAAI,GAAGoE,OAAO5B,EAAE,SAASzC,GAAG,GAAG+B,MAAM2C,QAAQ1E,GAAG,OAAO8B,EAAE9B,EAAE,CAA3C,CAA+CC,IAAI,SAASD,GAAG,GAAG,oBAAoB2E,QAAQ,MAAM3E,EAAE2E,OAAOC,WAAW,MAAM5E,EAAE,cAAc,OAAO+B,MAAM8C,KAAK7E,EAAE,CAA/G,CAAiHC,IAAI,SAASD,EAAEC,GAAG,GAAGD,EAAE,CAAC,GAAG,iBAAiBA,EAAE,OAAO8B,EAAE9B,EAAEC,GAAG,IAAII,EAAEM,OAAOmE,UAAUC,SAASC,KAAKhF,GAAGiF,MAAM,GAAG,GAAG,MAAM,SAAS5E,EAAE,WAAWA,GAAGL,EAAEkF,YAAYlF,EAAEkF,YAAYC,KAAK9E,IAAI,QAAQA,EAAE0B,MAAM8C,KAAK7E,GAAG,cAAcK,GAAG,2CAA2C+E,KAAK/E,GAAGyB,EAAE9B,EAAEC,QAAG,CAAM,CAAC,CAA1R,CAA4RA,IAAI,WAAW,MAAM,IAAIoF,UAAU,uIAAuI,CAAtK,GAA0K,CAAC,MAAM,KAAK,KAAK,KAAK,KAAK,MAAM,OAAO,SAAS,OAAO,YAAY,SAAS,YAAY,kBAAkB,gBAAgB,OAAO,KAAK,UAAU,iBAAiB,cAAc,SAAS,KAAK,YAAY,aAAa,QAAQ,aAAa,aAAa,YAAY,YAAY,QAAQ,aAAa,eAAe,OAAO,WAAW,WAAW,QAAQ,KAAKxC,EAAEe,EAAEvD,EAAEL,IAAIqE,OAAO5D,EAAE8D,KAAI,SAASvE,GAAG,MAAM,GAAGqE,OAAOrE,EAAE,IAAI,KAAIqE,OAAOjE,EAAEmE,KAAI,SAASvE,GAAG,MAAM,KAAKqE,OAAOrE,EAAE,MAAIQ,EAAEA,GAAG,CAAC,GAAI6B,KAAK7B,EAAE6B,GAAG,CAAC,GAAG7B,EAAE6B,GAAGiD,SAAS9E,EAAE6B,GAAGiD,OAAO,CAAC,GAAG9E,EAAE6B,GAAGkD,QAAQ/E,EAAE6B,GAAGkD,MAAM,CAAC,GAAG/E,EAAE6B,GAAGmD,QAAQhF,EAAE6B,GAAGmD,MAAM,IAAI,IAAIC,EAAEjF,EAAE6B,GAAG,SAASqD,EAAEtF,GAAG,OAAOO,OAAOC,KAAKR,GAAGuF,QAAO,SAAS3F,EAAEC,GAAG,IAAII,EAAED,EAAEH,GAAG,OAAQI,EAAEuF,KAAK5F,EAAEK,EAAEwF,UAAUxF,EAAEuF,KAAK5F,EAAEC,GAAGI,EAAEL,CAAC,GAAE,CAAC,EAAE,CAAC,SAAS8F,EAAE9F,EAAEC,EAAEI,GAAyDA,OAAE,KAApDD,GAAG,EAAEgB,UAAUC,aAAQ,IAAShB,EAAEA,EAAE,CAAC,GAAG0F,YAAwB3F,EAApE,IAAsEA,EAAEsF,EAAEzF,GAAG,mBAAmBwF,EAAEF,MAAMS,SAAS3F,EAAEoF,EAAEH,OAAOtF,GAAGmB,EAAEA,EAAE,CAAC,EAAEsE,EAAEH,OAAOtF,IAAI,CAAC,GAAGI,GAAGqF,EAAEF,MAAMS,QAAQhG,EAAE0F,EAAEzF,IAAI,QAAQD,GAAG8F,EAAE,KAAK7F,EAAE,CAAC,IAAIgG,EAAE,CAAC,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,iLAAiL,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,gNAAgN,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,oWAAoW,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,sXAAsX,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,8PAA8P,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,oWAAoW,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,0QAA0Q,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,wKAAwK,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,wcAAwc,EAAE,CAAC,IAAI,IAAI,GAAG,KAAK,qPAAqP,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,wkBAAwkB,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,4jCAA4jC,uBAAuB,CAAC,IAAI,IAAI,CAAC,wBAAwB,OAAO,mMAAmM2X,GAAG,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,wbAAwb,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,aAAa,OAAO,2eAA2e,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,giBAAgiB,aAAa,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,4ZAA4ZC,YAAY,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ijBAAijBzC,QAAQ,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,oKAAoK0C,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,oNAAoN,mCAAmC,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,oaAAoaC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,wpBAAwpB,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,MAAM,WAAW,aAAa,OAAO,0XAA0X,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,sQAAsQC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,8iBAA8iB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,ixBAAixB,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0OAA0O,gBAAgB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,gzBAAgzB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,krBAAkrB,8BAA8B,CAAC,IAAI,IAAI,GAAG,OAAO,uiCAAuiCC,WAAW,CAAC,IAAI,IAAI,CAAC,OAAO,mBAAmB,OAAO,kxBAAkxB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,8gBAA8gB,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,+JAA+J,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,8IAA8I,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,6UAA6U,qBAAqB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,qcAAqcC,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,2mCAA2mCC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,0cAA0cC,MAAM,CAAC,IAAI,IAAI,CAAC,2BAA2B,OAAO,81BAA81B,yBAAyB,CAAC,IAAI,IAAI,CAAC,wBAAwB,OAAO,kqBAAkqB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,uiBAAuiB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,2gBAA2gBC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,yzCAAyzC,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,qrBAAqrB,4BAA4B,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,mWAAmW,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,0NAA0N,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,ijCAAijC,iBAAiB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,i4BAAi4BC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,iBAAiB,OAAO,swBAAswB,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,s1BAAs1BC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,+UAA+U,cAAc,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,mVAAmV,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,qxBAAqxB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,s/BAAs/B,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,46BAA46B,iBAAiB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,mgBAAmgB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,+ZAA+Z,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,uNAAuN,oBAAoB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4dAA4dC,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,uVAAuVC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+MAA+M,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,+8BAA+8BC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+rDAA+rD,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,ouBAAouB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,sLAAsL,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,+yBAA+yBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,6mBAA6mB,2BAA2B,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,g6EAAg6E,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,giBAAgiBC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,iBAAiB,OAAO,0MAA0M,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,owBAAowB,iBAAiB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,2iBAA2iB,0BAA0B,CAAC,IAAI,IAAI,GAAG,OAAO,mXAAmX,WAAW,CAAC,IAAI,IAAI,CAAC,KAAK,mBAAmB,OAAO,kPAAkPC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qYAAqY,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,gfAAgfC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,g3BAAg3BC,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,iPAAiP,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,6XAA6XC,OAAO,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,cAAc,OAAO,2mBAA2mBC,SAAS,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,qTAAqT,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,gKAAgK5D,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,+sBAA+sBC,MAAM,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,sXAAsX,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,snBAAsnB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,6cAA6c,iBAAiB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,ydAAyd,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,qUAAqU,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,y6BAAy6B,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,iWAAiW4D,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,wbAAwb,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4qBAA4qB,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,wkBAAwkB,eAAe,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,g2BAAg2B,aAAa,CAAC,IAAI,IAAI,CAAC,YAAY,wBAAwB,OAAO,giBAAgiB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,kPAAkP,cAAc,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,sQAAsQ,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,m9BAAm9B,iBAAiB,CAAC,IAAI,IAAI,CAAC,oBAAoB,yBAAyB,OAAO,6pBAA6pB,0BAA0B,CAAC,IAAI,IAAI,CAAC,OAAO,sBAAsB,OAAO,k2BAAk2B,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,gdAAgd,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,6vDAA6vDC,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,sUAAsU,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,61BAA61BC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qiBAAqiB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,+SAA+S,uBAAuB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,6KAA6KC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,sqBAAsqBC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,+PAA+P,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,klBAAklB,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,0TAA0T,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,omBAAomB,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,0qBAA0qBC,MAAM,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,8fAA8fC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,+cAA+c,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,+hBAA+hB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,wZAAwZC,WAAW,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,05BAA05B,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,kBAAkB,OAAO,+PAA+P,YAAY,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,2qBAA2qB,gBAAgB,CAAC,IAAI,IAAI,CAAC,4BAA4B,OAAO,i1BAAi1B,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,q0BAAq0B,qBAAqB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,4RAA4RC,SAAS,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,KAAK,OAAO,OAAO,8fAA8f,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,wlCAAwlC,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,0gBAA0gB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,mPAAmPC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,qMAAqM,+BAA+B,CAAC,IAAI,IAAI,GAAG,OAAO,uXAAuX,qBAAqB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,slBAAslB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,8tBAA8tBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,srCAAsrCC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,cAAc,OAAO,mWAAmW,oBAAoB,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,8nBAA8nB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,mqBAAmqB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,oMAAoMC,WAAW,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qjBAAqjBvE,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qQAAqQ,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,+RAA+R,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,2kBAA2kB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,ueAAuewE,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,qXAAqX,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,6SAA6SC,KAAK,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,2RAA2R,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,+5BAA+5B,gBAAgB,CAAC,IAAI,IAAI,CAAC,KAAK,eAAe,OAAO,2aAA2a,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,4aAA4a,kBAAkB,CAAC,IAAI,IAAI,CAAC,KAAK,4BAA4B,OAAO,sXAAsX,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,8wDAA8wDC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,q5BAAq5B,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,wWAAwW,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,qqBAAqqBzE,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,gaAAga,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,8JAA8J,qBAAqB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,8LAA8L,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,y9BAAy9B,2BAA2B,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,+ZAA+Z,sBAAsB,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,kMAAkM,iBAAiB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,+VAA+V,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,4wBAA4wB,oBAAoB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,uZAAuZ0E,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,khBAAkhB,wBAAwB,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,yqCAAyqCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2vBAA2vB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,yTAAyT,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,80BAA80B1E,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,gTAAgT2E,SAAS,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,q7DAAq7DC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,8gBAA8gB,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,4YAA4Y,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,skBAAskB,iBAAiB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+vCAA+vCC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,4cAA4c,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,ojCAAojCC,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,8YAA8Y,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,gmCAAgmCC,SAAS,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,KAAK,qXAAqX,eAAe,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,MAAM,gBAAgB,OAAO,4PAA4P,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,uTAAuTC,QAAQ,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,0YAA0YC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,kcAAkcC,KAAK,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,skBAAskB,oBAAoB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,4XAA4X,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,uhBAAuhBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,qWAAqW,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,gGAAgG,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,gnBAAgnBC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,oRAAoR,6BAA6B,CAAC,IAAI,IAAI,GAAG,OAAO,upBAAupBC,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,yRAAyR,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,sWAAsWpF,OAAO,CAAC,IAAI,IAAI,CAAC,KAAK,cAAc,eAAe,OAAO,qtBAAqtBqF,WAAW,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,sVAAsVC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wNAAwN,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0eAA0e,iBAAiB,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,k/BAAk/B,eAAe,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,uhBAAuhB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,q9CAAq9CC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,sdAAsd,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,sjBAAsjB,cAAc,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,s/BAAs/B,gBAAgB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wfAAwf,WAAW,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ofAAof,cAAc,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,gWAAgW,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,geAAge,iBAAiB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,oeAAoeC,WAAW,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+cAA+cC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,sbAAsb,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,sSAAsS,iBAAiB,CAAC,IAAI,IAAI,CAAC,kBAAkB,uBAAuB,OAAO,8qBAA8qBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,uUAAuU,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,wvBAAwvBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,6lBAA6lB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,qwBAAqwB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,2gBAA2gB1F,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,cAAc,OAAO,waAAwa2F,GAAG,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,mpEAAmpEC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,wVAAwV,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,umBAAumBC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,iTAAiT,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,4KAA4K,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2nBAA2nBC,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,6XAA6X,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,ijCAAijC,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,woBAAwoB,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,omBAAomB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,ucAAuc,iBAAiB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,waAAwa,WAAW,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,mBAAmB,OAAO,ivBAAivB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,inBAAinB,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,+TAA+T,aAAa,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,iaAAia,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,gjBAAgjB,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,gkBAAgkBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,yYAAyYC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,kQAAkQnf,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,wLAAwL,eAAe,CAAC,IAAI,IAAI,CAAC,KAAK,WAAW,OAAO,mqBAAmqBof,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,kdAAkd,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,8WAA8WC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ugBAAugBje,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,sMAAsM,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,+YAA+Yke,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,60BAA60B,cAAc,CAAC,IAAI,IAAI,CAAC,gBAAgB,YAAY,qBAAqB,OAAO,mnBAAmnB,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,yyBAAyyB,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,+kBAA+kB,+BAA+B,CAAC,IAAI,IAAI,GAAG,OAAO,wsCAAwsCC,QAAQ,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,OAAO,ujCAAujC,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,6WAA6W,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4YAA4YC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4vBAA4vBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,qWAAqWC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,0lCAA0lC,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,6KAA6KC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,quBAAquBC,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,sYAAsYC,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,6TAA6T,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,qfAAqf,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,gBAAgB,oBAAoB,OAAO,gkBAAgkB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,2aAA2aC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,idAAid,YAAY,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,qxBAAqxB,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,oTAAoT,gBAAgB,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,ipBAAipB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,4SAA4S1G,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,+KAA+K,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,sXAAsX,iBAAiB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,0mBAA0mB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,snBAAsnB2G,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,4OAA4OC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,upBAAupB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,+oBAA+oB,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,2ZAA2Z3G,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,MAAM,gBAAgB,OAAO,mLAAmL,gBAAgB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,mYAAmY4G,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8eAA8e,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,qUAAqU,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,geAAgeC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,0RAA0RC,MAAM,CAAC,IAAI,IAAI,CAAC,YAAY,YAAY,0BAA0B,OAAO,0WAA0W,sBAAsB,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,k4BAAk4BliB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,oMAAoM,WAAW,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,0aAA0a,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,mgBAAmgBqb,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,uLAAuL,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,k3BAAk3B8G,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,4dAA4d,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,gZAAgZ,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,87BAA87B,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,2RAA2RC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,4bAA4bC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4SAA4SC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,+aAA+aC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,s9BAAs9BC,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wtBAAwtB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,kZAAkZ,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,2jBAA2jB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,kLAAkLjjB,EAAE,CAAC,IAAI,IAAI,CAAC,IAAI,KAAK,oPAAoP,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,qJAAqJ,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,q/BAAq/B,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,0cAA0c,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,kcAAkc,aAAa,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,8WAA8W,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,qWAAqW,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,weAAwekjB,IAAI,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,OAAO,8PAA8P,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,mzBAAmzB,qBAAqB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,wnCAAwnCnH,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,OAAO,kVAAkVoH,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6cAA6cC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,8NAA8NC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,0LAA0L,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,ytBAAytB,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,8PAA8PC,SAAS,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,mkBAAmkB,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,qrBAAqrBC,OAAO,CAAC,IAAI,IAAI,CAAC,0BAA0B,OAAO,wUAAwUC,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,OAAO,6uBAA6uB,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,oqBAAoqB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,giBAAgiB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,2KAA2KC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,uzCAAuzC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,qwBAAqwB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,4lCAA4lC,2BAA2B,CAAC,IAAI,IAAI,GAAG,OAAO,ygBAAygB,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,yuBAAyuB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,4pBAA4pBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,oIAAoI,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,kKAAkK,yBAAyB,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,4ZAA4Z,aAAa,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,sfAAsfC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,8XAA8X,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,mkCAAmkC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,uiBAAuiB,kBAAkB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,2aAA2a,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,+qBAA+qB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,o3CAAo3C,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,knBAAknB,mBAAmB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,qcAAqcC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,uSAAuSC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,qyBAAqyB,cAAc,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,irBAAirB,mBAAmB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,s7BAAs7B,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,6pBAA6pBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8vBAA8vBC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2aAA2a,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,2SAA2S,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,01BAA01B,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,khBAAkhB,eAAe,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,8WAA8W,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,6eAA6e,eAAe,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,orBAAorB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,mhBAAmhB,oBAAoB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,sgBAAsgBC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,8pBAA8pB,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,qPAAqP,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,sIAAsI,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,0rBAA0rB/jB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,6HAA6HgkB,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ygBAAygB,YAAY,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,imBAAimB,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,4fAA4f,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,yQAAyQ,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,6VAA6V,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,gjBAAgjB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,yfAAyfC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,q1CAAq1C,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,kBAAkB,OAAO,8eAA8e,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,glBAAglBC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,yQAAyQ,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,8tBAA8tBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,8gBAA8gB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,otBAAotB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,gtBAAgtB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,8QAA8Q,iBAAiB,CAAC,IAAI,IAAI,CAAC,KAAK,iBAAiB,OAAO,yaAAya,oBAAoB,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,mnBAAmnBpI,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,kSAAkS,2BAA2B,CAAC,IAAI,IAAI,GAAG,OAAO,keAAke,gBAAgB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,mUAAmUqI,KAAK,CAAC,IAAI,IAAI,CAAC,SAAS,eAAe,OAAO,ucAAucC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,ikBAAikBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,yWAAyW,WAAW,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,oeAAoeC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,qxBAAqxBC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,wkCAAwkCC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,iVAAiV,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,ssBAAssB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,s8BAAs8B,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,uSAAuS,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,qWAAqW,kBAAkB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,sbAAsb,mBAAmB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,6jBAA6jBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,k5CAAk5C,uBAAuB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6XAA6X,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,ilBAAilB,cAAc,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,+cAA+cC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4jBAA4jB,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,oMAAoM,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,wkBAAwkBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,u5CAAu5C,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,4TAA4T,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,m/BAAm/B,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,waAAwa,oBAAoB,CAAC,IAAI,IAAI,CAAC,KAAK,iBAAiB,OAAO,iZAAiZ,iBAAiB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,8ZAA8Z,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,+QAA+Q,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,quCAAquC,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,gaAAga,gBAAgB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,8tCAA8tC,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,qRAAqRC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wYAAwY,iBAAiB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,0sBAA0sBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,mbAAmbC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,yZAAyZ,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,iXAAiX,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,gSAAgS,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,8aAA8aC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,2mBAA2mBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,iTAAiTjJ,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,sYAAsYkJ,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,mXAAmXC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,o6BAAo6B,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,+JAA+JC,YAAY,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,uVAAuV,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,oMAAoM,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,irDAAirD,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,6iBAA6iB,6BAA6B,CAAC,IAAI,IAAI,GAAG,OAAO,2kCAA2kC,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,qBAAqB,OAAO,2PAA2P,cAAc,CAAC,IAAI,IAAI,CAAC,cAAc,kBAAkB,YAAY,OAAO,oWAAoW,gBAAgB,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,0JAA0JnJ,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,sfAAsf,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,slBAAslBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,u0BAAu0BC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2cAA2ckJ,MAAM,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,4dAA4dC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,uuBAAuuBC,SAAS,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,k8BAAk8BC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,qSAAqSC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,ivBAAivB,eAAe,CAAC,IAAI,IAAI,CAAC,IAAI,sBAAsB,OAAO,oVAAoVC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,i5BAAi5B,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,oOAAoOC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ghBAAghBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,4YAA4Y,YAAY,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,QAAQ,OAAO,odAAodC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,iaAAia,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,qMAAqM,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,mKAAmK,mBAAmB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,qvBAAqvB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,6/BAA6/BC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,4YAA4Y,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,wiBAAwiBC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0MAA0M,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,uUAAuUC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2YAA2Y,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,u+BAAu+B,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,mfAAmf,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,okBAAokB,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,sMAAsM,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,+iBAA+iBC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+sBAA+sB,yCAAyC,CAAC,IAAI,IAAI,GAAG,OAAO,qvCAAqvC,gBAAgB,CAAC,IAAI,IAAI,CAAC,IAAI,MAAM,cAAc,OAAO,ocAAocC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+wCAA+wC,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,sYAAsY,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,gZAAgZ,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,+NAA+N,0BAA0B,CAAC,IAAI,IAAI,GAAG,OAAO,+fAA+fC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,muBAAmuB,YAAY,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,qgBAAqgBC,SAAS,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,0QAA0Q,iBAAiB,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,mvBAAmvB,uBAAuB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,olCAAolC,eAAe,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,0WAA0W,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,qPAAqP,cAAc,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,uVAAuV,wBAAwB,CAAC,IAAI,IAAI,CAAC,wBAAwB,OAAO,8RAA8R,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,mdAAmdC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,8TAA8TC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,ieAAie,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,qcAAqcC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,wNAAwN,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,wnBAAwnB,oBAAoB,CAAC,IAAI,IAAI,CAAC,gBAAgB,gBAAgB,qBAAqB,OAAO,qeAAqeC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,kmBAAkmBpK,WAAW,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,4SAA4S,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,SAAS,OAAO,0eAA0e,wBAAwB,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,wwBAAwwBqK,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,yjBAAyjB,qBAAqB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,sYAAsYC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,2hBAA2hB,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,scAAsc,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,8tBAA8tB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,2bAA2bC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,smBAAsmBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2jBAA2jB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,MAAM,oBAAoB,0BAA0B,OAAO,gQAAgQ,iBAAiB,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,oBAAoB,OAAO,+bAA+bC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2aAA2a,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,yhBAAyhBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,61BAA61BC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,ojCAAojC,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,itCAAitCC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,OAAO,sPAAsP,aAAa,CAAC,IAAI,IAAI,CAAC,KAAK,gBAAgB,OAAO,6UAA6U,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,+mBAA+mB,oBAAoB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,0PAA0P,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,45DAA45D,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,ydAAyd,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,iuBAAiuB,wBAAwB,CAAC,IAAI,IAAI,CAAC,kBAAkB,oBAAoB,OAAO,+jBAA+jB,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,2jBAA2jB,aAAa,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,oeAAoe,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,0cAA0c,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,4cAA4c,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,+SAA+S,oBAAoB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,+UAA+U,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,8lBAA8lB,mBAAmB,CAAC,IAAI,IAAI,CAAC,MAAM,iBAAiB,sBAAsB,OAAO,icAAicC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,uuBAAuuBC,WAAW,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,qOAAqO,mBAAmB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,mTAAmT,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,kSAAkS,yBAAyB,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,8qBAA8qB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,iPAAiP,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,wZAAwZ,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,QAAQ,OAAO,gvBAAgvB,oBAAoB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,shBAAshB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,miBAAmiB,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,wRAAwR,gBAAgB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,6QAA6Q,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,m2BAAm2BC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,kvBAAkvB,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,kiBAAkiB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,ynBAAynB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,wzBAAwzBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,mOAAmO,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,0iBAA0iB,sBAAsB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,0ZAA0Z,cAAc,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,6oBAA6oB/K,OAAO,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,KAAK,OAAO,OAAO,6GAA6G,sBAAsB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,0PAA0PgL,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,kZAAkZ,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,gaAAgaC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2MAA2M,0BAA0B,CAAC,IAAI,IAAI,GAAG,OAAO,whBAAwhBC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,4bAA4b,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,m8CAAm8CC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,k+CAAk+CjnB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,8MAA8M,gBAAgB,CAAC,IAAI,IAAI,CAAC,KAAK,UAAU,OAAO,qyBAAqyB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,wlBAAwlBJ,EAAE,CAAC,IAAI,IAAI,CAAC,IAAI,KAAK,2OAA2O,yBAAyB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,wxBAAwxB,kBAAkB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,gmBAAgmB6F,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,6KAA6KsW,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,OAAO,urCAAurCC,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,8nBAA8nB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,ijBAAijB,qBAAqB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,uPAAuP,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,0iBAA0iBkL,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,0wBAA0wBC,KAAK,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,mTAAmT,UAAU,CAAC,IAAI,IAAI,CAAC,WAAW,gBAAgB,OAAO,ktBAAktB,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,y0BAAy0B,qBAAqB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,2gCAA2gC,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,ofAAof,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,mJAAmJC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,m2BAAm2B,uBAAuB,CAAC,IAAI,IAAI,CAAC,OAAO,mBAAmB,OAAO,mhDAAmhD9mB,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,yNAAyN+mB,SAAS,CAAC,IAAI,IAAI,CAAC,MAAM,MAAM,OAAO,KAAK,gXAAgX,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,u5BAAu5B,qBAAqB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,4lBAA4lB,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,yZAAyZ,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,khBAAkhB,eAAe,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,q5BAAq5B,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,oYAAoY,mBAAmB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,kdAAkdC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,kdAAkd,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,iBAAiB,OAAO,iNAAiN,8BAA8B,CAAC,IAAI,IAAI,GAAG,OAAO,+8BAA+8B,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,6bAA6b,6BAA6B,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,gaAAga,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,6rBAA6rB,WAAW,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,OAAO,OAAO,2hBAA2hB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,stCAAstC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,wdAAwdnkB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,uKAAuKokB,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,qbAAqbC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2XAA2XC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,+gBAA+gB,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,wjBAAwjB,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,2VAA2V,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,2RAA2R,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,ieAAie,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,0ZAA0Z,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,2sBAA2sB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,kkBAAkkB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,i1BAAi1B,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,ghCAAghC,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,+1BAA+1BC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,mcAAmc,eAAe,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,yHAAyHC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,43BAA43B,eAAe,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qhBAAqhB,gBAAgB,CAAC,IAAI,IAAI,CAAC,WAAW,YAAY,OAAO,uaAAuaC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,yQAAyQ,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,0BAA0B,OAAO,+SAA+S,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,6iBAA6iB,oBAAoB,CAAC,IAAI,IAAI,CAAC,OAAO,oBAAoB,OAAO,6fAA6f,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,urBAAurB,aAAa,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,8VAA8V,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,sqBAAsqB,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,iOAAiO,aAAa,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,sZAAsZ,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,uwBAAuwB,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,wuBAAwuB9lB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,wMAAwM+lB,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,iMAAiM,qBAAqB,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,uwBAAuwB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,+4BAA+4B,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,ioBAAioB5L,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,KAAK,MAAM,OAAO,OAAO,oRAAoR,iBAAiB,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0fAA0f,aAAa,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,gWAAgW,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,+kCAA+kC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,uVAAuV,uBAAuB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,0aAA0a,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,0LAA0L,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,quBAAquB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,mOAAmO,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,klBAAklB,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,aAAa,OAAO,yWAAyW,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,iwBAAiwB3a,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,uQAAuQ,sBAAsB,CAAC,IAAI,IAAI,CAAC,gBAAgB,gBAAgB,uBAAuB,OAAO,+jBAA+jBwmB,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,+RAA+R,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,ofAAof,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,ylBAAylB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,6eAA6e,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,iPAAiPC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,6qBAA6qB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,0zBAA0zB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,koBAAkoB,uCAAuC,CAAC,IAAI,IAAI,GAAG,OAAO,gsDAAgsD,yBAAyB,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,uZAAuZ7L,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,KAAK,KAAK,KAAK,MAAM,MAAM,OAAO,OAAO,qDAAqD,gBAAgB,CAAC,IAAI,IAAI,CAAC,KAAK,iBAAiB,OAAO,6UAA6U8L,QAAQ,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,MAAM,OAAO,mgCAAmgC,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,m4BAAm4B,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,wgBAAwgBC,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,gaAAgaC,WAAW,CAAC,IAAI,IAAI,CAAC,OAAO,mBAAmB,OAAO,i6BAAi6B,iBAAiB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2iBAA2iB,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,uBAAuB,OAAO,sSAAsS,uBAAuB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,+KAA+K,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,gzBAAgzB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,qhCAAqhCC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,oOAAoO,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,+VAA+V,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,6rBAA6rBC,OAAO,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,meAAmeC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,mcAAmcC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,+0CAA+0C,aAAa,CAAC,IAAI,IAAI,CAAC,KAAK,SAAS,OAAO,2iCAA2iC,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,yZAAyZ,kBAAkB,CAAC,IAAI,IAAI,CAAC,MAAM,mBAAmB,OAAO,qbAAqb,kBAAkB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,icAAic,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,q1BAAq1B,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,yRAAyR,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,mgBAAmgB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,4jBAA4jBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,oVAAoV,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,4RAA4R,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,6YAA6YC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wwBAAwwB,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,8oBAA8oB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,idAAidC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,ylBAAylB,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,+kBAA+kBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,soBAAsoB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,kjBAAkjB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,k+BAAk+B,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,8UAA8UC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,wZAAwZ,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,oRAAoR,eAAe,CAAC,IAAI,IAAI,CAAC,KAAK,gBAAgB,OAAO,wUAAwU,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,0IAA0I,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,mRAAmR,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,ilBAAilB,gBAAgB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,wQAAwQ,cAAc,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,8nBAA8nB,eAAe,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,0jBAA0jB,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,wkBAAwkBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,iQAAiQ,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,uJAAuJ,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,8cAA8c,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,ijBAAijBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,6OAA6O,aAAa,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,8ZAA8Z1M,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,gkBAAgkB,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,8nBAA8nB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,0eAA0e,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,q+BAAq+B,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,6bAA6b,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,gUAAgU2M,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wQAAwQ,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,2mBAA2mB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,iaAAiaC,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,kBAAkB,OAAO,2cAA2cC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,goBAAgoB,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,2oBAA2oB,gBAAgB,CAAC,IAAI,IAAI,CAAC,MAAM,WAAW,OAAO,+fAA+f,kBAAkB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,kMAAkM,aAAa,CAAC,IAAI,IAAI,CAAC,KAAK,SAAS,OAAO,0VAA0VC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,0ZAA0ZC,KAAK,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,gVAAgV,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,QAAQ,OAAO,4SAA4SC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2xCAA2xC,mBAAmB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,wwBAAwwB,UAAU,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,8JAA8J,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,cAAc,OAAO,meAAme,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,qhBAAqhBC,QAAQ,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,gKAAgK,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,kjBAAkjB,sBAAsB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,ipCAAipCC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,68CAA68C,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,0lBAA0lB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,urBAAurB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,obAAob,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,g5BAAg5B,qBAAqB,CAAC,IAAI,IAAI,CAAC,KAAK,qBAAqB,uBAAuB,QAAQ,OAAO,wUAAwUC,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,w1CAAw1C,sBAAsB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,qaAAqa,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,8vBAA8vB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,wZAAwZ,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,kwBAAkwB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,yyDAAyyD,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,myDAAmyDC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,yRAAyR,iBAAiB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,icAAic,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,mcAAmc,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,i4BAAi4B,YAAY,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,y0CAAy0C,eAAe,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,8pBAA8pB,cAAc,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,oMAAoM,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,mzBAAmzB,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,6WAA6W,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,qjDAAqjD,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,w5BAAw5B,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,mxBAAmxB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,giBAAgiB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,6vBAA6vB,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,oYAAoY,iBAAiB,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,2lBAA2lBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,kQAAkQ,eAAe,CAAC,IAAI,IAAI,CAAC,mBAAmB,sBAAsB,OAAO,8LAA8L,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4ZAA4ZC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qrBAAqrB,cAAc,CAAC,IAAI,IAAI,CAAC,KAAK,mBAAmB,OAAO,8WAA8WC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,UAAU,OAAO,obAAobC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,iwBAAiwBC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,KAAK,OAAO,iLAAiL,aAAa,CAAC,IAAI,IAAI,CAAC,KAAK,SAAS,OAAO,+OAA+OC,WAAW,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,uTAAuT,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,+tBAA+tB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,w8BAAw8BC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,4tCAA4tC,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,gOAAgO,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,opBAAopBC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,saAAsa,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,sXAAsX,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8eAA8eC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,6yBAA6yBC,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,0KAA0K,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,uhCAAuhC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,wXAAwX,uBAAuB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,gcAAgc,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,mSAAmS,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,wrBAAwrB,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,kZAAkZ/nB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,qGAAqG,mBAAmB,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,sdAAsd,cAAc,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2zBAA2zBgoB,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,izCAAizC,eAAe,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,waAAwa,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,qlCAAqlC,gBAAgB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,6gBAA6gB,oBAAoB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,wMAAwM,cAAc,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,0qBAA0qB,eAAe,CAAC,IAAI,IAAI,CAAC,wBAAwB,OAAO,ukBAAukB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,iaAAia,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,4VAA4VC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,gUAAgU,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,2hBAA2hBC,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,kpBAAkpB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,ymBAAymBC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2MAA2MjO,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,uWAAuW,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,QAAQ,OAAO,wzDAAwzDC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,OAAO,OAAO,uQAAuQ,YAAY,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,mVAAmViO,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+bAA+b,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,43BAA43BC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,shBAAshBC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,yzBAAyzB,eAAe,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,2pBAA2pB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,oQAAoQ,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,k3BAAk3B,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,6aAA6a,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,klBAAklBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,yeAAye,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,4yBAA4yBnO,UAAU,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,mBAAmB,OAAO,0gBAA0gBoO,SAAS,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6RAA6R,cAAc,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,qkBAAqkB,cAAc,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,gQAAgQ,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,+mCAA+mCC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,cAAc,OAAO,wPAAwP,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,kuBAAkuBC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,iZAAiZ,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,ivBAAivBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,0bAA0b,iBAAiB,CAAC,IAAI,IAAI,CAAC,mBAAmB,qBAAqB,OAAO,6pBAA6pB,uBAAuB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,2fAA2fC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,urDAAurD,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,iXAAiXC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,g2BAAg2B,8BAA8B,CAAC,IAAI,IAAI,CAAC,2BAA2B,OAAO,sXAAsXC,SAAS,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wZAAwZC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,OAAO,kYAAkYC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,orBAAorB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,qfAAqfC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,yUAAyU,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,8gCAA8gC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4UAA4U,iBAAiB,CAAC,IAAI,IAAI,CAAC,iBAAiB,mBAAmB,OAAO,8qBAA8qB,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,ykCAAykCC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6gBAA6gB,oBAAoB,CAAC,IAAI,IAAI,CAAC,KAAK,oBAAoB,oBAAoB,wBAAwB,QAAQ,OAAO,oUAAoU,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,uTAAuT,yBAAyB,CAAC,IAAI,IAAI,CAAC,OAAO,qBAAqB,OAAO,mzCAAmzCC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,6bAA6b,iBAAiB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,kfAAkf,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,qXAAqX,6BAA6B,CAAC,IAAI,IAAI,GAAG,OAAO,qjBAAqjB,uBAAuB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,m1BAAm1B,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,0ZAA0Z,gBAAgB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ihBAAihB,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,yBAAyB,OAAO,gTAAgT,eAAe,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,8hBAA8hB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,6ZAA6Z,oBAAoB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,OAAO,yaAAya,cAAc,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,8QAA8Q,eAAe,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,gcAAgc,YAAY,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,oOAAoOC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,myBAAmyB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,2zBAA2zBC,MAAM,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,MAAM,YAAY,OAAO,wGAAwGC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,y5BAAy5B,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,8NAA8N,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,+jBAA+jB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,w5BAAw5B,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,qQAAqQ,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,iLAAiL,yBAAyB,CAAC,IAAI,IAAI,CAAC,sCAAsC,mBAAmB,6CAA6C,OAAO,ugCAAugCC,KAAK,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,OAAO,+gCAA+gC,gBAAgB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,mfAAmfC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,gzBAAgzBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,46BAA46B,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,mpBAAmpB,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,qwBAAqwB,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,iBAAiB,OAAO,+XAA+XC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,iUAAiU,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,qkBAAqkB,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,iaAAia,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,grBAAgrB,gBAAgB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,4QAA4Q1mB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,gLAAgL,uBAAuB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,ohBAAohBqX,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,sLAAsL,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,2NAA2NsP,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,wPAAwPC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,mdAAmd,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,+cAA+cjsB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,o6BAAo6BksB,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,mgBAAmgBvP,SAAS,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,s3CAAs3C,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,iKAAiK,wBAAwB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,0dAA0d,6BAA6B,CAAC,IAAI,IAAI,CAAC,gBAAgB,gBAAgB,8BAA8B,OAAO,gkBAAgkB,gBAAgB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,2NAA2N,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,gZAAgZ,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,6rBAA6rB,eAAe,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,6RAA6R,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,k6BAAk6BwP,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,61CAA61CC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,gkEAAgkEC,QAAQ,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,2tBAA2tB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,6bAA6b,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,gfAAgf,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,mQAAmQC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,0oBAA0oB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,iZAAiZC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,qYAAqY,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,iaAAia,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,ulCAAulC,oBAAoB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,sIAAsIC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2gBAA2gB,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,gZAAgZ,aAAa,CAAC,IAAI,IAAI,CAAC,wBAAwB,OAAO,yOAAyO,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,ohCAAohC,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,0RAA0RC,IAAI,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,ykCAAykC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,6dAA6d,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,mlBAAmlB,mCAAmC,CAAC,IAAI,IAAI,GAAG,OAAO,qhBAAqhB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,ymBAAymB,gBAAgB,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,0qBAA0qB,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,grBAAgrB7P,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,qgBAAqgB8P,WAAW,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wlBAAwlB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,giCAAgiCxqB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,uNAAuN,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,QAAQ,OAAO,snBAAsnB,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,OAAO,usBAAusB,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,olCAAolCyqB,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4+BAA4+B,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,shBAAshBC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,0ZAA0Z,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,oTAAoT,cAAc,CAAC,IAAI,IAAI,CAAC,KAAK,aAAa,OAAO,4ZAA4Z,cAAc,CAAC,IAAI,IAAI,CAAC,oBAAoB,cAAc,OAAO,ycAAyc,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,wZAAwZC,QAAQ,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,qTAAqTC,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,iBAAiB,gBAAgB,OAAO,6LAA6L,WAAW,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,gOAAgO,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,2XAA2X,aAAa,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,2rBAA2rB,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,mBAAmB,OAAO,yiBAAyiBC,QAAQ,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,qeAAqe,2BAA2B,CAAC,IAAI,IAAI,GAAG,OAAO,sgBAAsgBC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,MAAM,OAAO,WAAW,eAAe,OAAO,ycAAyc,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,oUAAoU,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,0ZAA0ZC,EAAE,CAAC,IAAI,IAAI,CAAC,IAAI,KAAK,mRAAmR,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,oVAAoV,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,uJAAuJ,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,6cAA6c,yBAAyB,CAAC,IAAI,IAAI,CAAC,KAAK,YAAY,OAAO,gbAAgb,eAAe,CAAC,IAAI,IAAI,CAAC,WAAW,kBAAkB,OAAO,0WAA0WC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,WAAW,OAAO,gkBAAgkB,sBAAsB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,4jBAA4jB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,ohBAAohB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,uqCAAuqCC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6WAA6W,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,65BAA65B,eAAe,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2fAA2f,UAAU,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,6NAA6NC,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,+GAA+G,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,4cAA4cC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,qZAAqZC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,kUAAkUC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,+IAA+I,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,6PAA6P,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,QAAQ,OAAO,uSAAuS,kBAAkB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,iUAAiUC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ibAAib,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,4fAA4fC,KAAK,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,OAAO,qPAAqP,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,oTAAoT,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,ugBAAugBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,sXAAsXC,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,6VAA6VC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,w0BAAw0B,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,8oBAA8oB,WAAW,CAAC,IAAI,IAAI,CAAC,IAAI,MAAM,MAAM,MAAM,OAAO,OAAO,gXAAgX,aAAa,CAAC,IAAI,IAAI,CAAC,KAAK,SAAS,MAAM,SAAS,OAAO,+TAA+T/Q,IAAI,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,umBAAumBgR,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,keAAke,kBAAkB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,8XAA8X,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,wkBAAwkB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,uVAAuVC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,8MAA8M,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,yBAAyB,OAAO,+SAA+S,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,kmBAAkmB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,sVAAsV,wBAAwB,CAAC,IAAI,IAAI,CAAC,mBAAmB,wBAAwB,OAAO,8jBAA8jB,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,wMAAwM,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,4KAA4K,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,gjBAAgjB,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,mqBAAmqB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,6LAA6L,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,8jBAA8jB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,2ZAA2Z,mBAAmB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,+NAA+N,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,mxBAAmxBC,SAAS,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,uIAAuI,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,kXAAkX,cAAc,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,oWAAoW,wBAAwB,CAAC,IAAI,IAAI,GAAG,OAAO,oiBAAoiBC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,miBAAmiB,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,uxBAAuxBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,m2BAAm2B,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,mBAAmB,OAAO,4SAA4SC,IAAI,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,6cAA6c,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,grBAAgrB,mBAAmB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,4UAA4U,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,6SAA6S,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,4gBAA4gB,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,2xCAA2xC,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,kgBAAkgB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,ufAAuf,uBAAuB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,4wBAA4wBC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+pBAA+pBC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wPAAwP,WAAW,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,ySAAySC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,qvBAAqvB,mBAAmB,CAAC,IAAI,IAAI,CAAC,gBAAgB,gBAAgB,oBAAoB,OAAO,gkBAAgkBvR,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,uYAAuYwR,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,ojBAAojB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,gqBAAgqB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4cAA4c,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,udAAud,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,wVAAwV,kBAAkB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,2nBAA2nB,eAAe,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,4KAA4KvR,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,OAAO,yIAAyI,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,6MAA6M,kBAAkB,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,84BAA84B,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,+NAA+NwR,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,+RAA+RC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,yQAAyQ,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,gBAAgB,kBAAkB,OAAO,6zBAA6zB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,kvBAAkvBC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,mnBAAmnB,gBAAgB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,kQAAkQC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4hBAA4hBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,0WAA0WC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,k0CAAk0C,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,iBAAiB,uBAAuB,OAAO,+YAA+Y,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,slBAAslB5R,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,eAAe,iBAAiB,OAAO,0mBAA0mB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,+fAA+f,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,2hBAA2hB,iBAAiB,CAAC,IAAI,IAAI,CAAC,UAAU,UAAU,OAAO,4RAA4R6R,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,46CAA46CC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,kjCAAkjC,2BAA2B,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,2kBAA2kBC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+sBAA+sB,YAAY,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wQAAwQ,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,4mBAA4mBC,IAAI,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,8eAA8eC,UAAU,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,yMAAyM,gBAAgB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,mVAAmV,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,+MAA+M,gBAAgB,CAAC,IAAI,IAAI,CAAC,MAAM,QAAQ,aAAa,OAAO,qiBAAqiBC,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,8wBAA8wB,aAAa,CAAC,IAAI,IAAI,CAAC,+BAA+B,OAAO,klCAAklC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,02BAA02BC,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,sJAAsJC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,4SAA4S,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,yhBAAyhB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,mbAAmb,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,OAAO,4OAA4O,2BAA2B,CAAC,IAAI,IAAI,CAAC,OAAO,wBAAwB,gBAAgB,OAAO,okBAAokB,wBAAwB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,4tBAA4tB,qBAAqB,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,kbAAkb,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,2XAA2X,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,ygBAAygB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,8lBAA8lB,WAAW,CAAC,IAAI,IAAI,CAAC,OAAO,aAAa,OAAO,8bAA8bC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,yoBAAyoB,aAAa,CAAC,IAAI,IAAI,CAAC,aAAa,iBAAiB,OAAO,4WAA4W,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,wcAAwc,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,ghBAAghB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,4KAA4K,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,aAAa,OAAO,6RAA6R,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,OAAO,2hBAA2hB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,gsBAAgsB,UAAU,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,yYAAyY,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,8yBAA8yB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,gmCAAgmC,YAAY,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,uOAAuO,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,sjBAAsjB,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,iZAAiZ,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,szBAAszB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,+0BAA+0B,gBAAgB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,saAAsa,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0NAA0NC,GAAG,CAAC,IAAI,IAAI,CAAC,MAAM,aAAa,UAAU,OAAO,yNAAyNC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,8nBAA8nB,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,8qBAA8qB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,mVAAmV,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,wNAAwN,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,qcAAqcC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,mfAAmf,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,89BAA89BC,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,wLAAwL,sBAAsB,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,6yBAA6yB,aAAa,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,gmBAAgmBC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,kbAAkb,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,giBAAgiB,qBAAqB,CAAC,IAAI,IAAI,CAAC,KAAK,UAAU,OAAO,0GAA0GC,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,wRAAwR,mBAAmB,CAAC,IAAI,IAAI,CAAC,KAAK,iBAAiB,OAAO,4jBAA4jBC,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,iBAAiB,OAAO,olCAAolC,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,+eAA+e,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,yUAAyU3S,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,kQAAkQ,eAAe,CAAC,IAAI,IAAI,CAAC,cAAc,gBAAgB,OAAO,4ZAA4Z,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,swCAAswC4S,KAAK,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,opBAAopB,oBAAoB,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,umBAAumB,kBAAkB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,utBAAutBC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,weAAwe,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,gXAAgXC,KAAK,CAAC,IAAI,IAAI,CAAC,MAAM,MAAM,OAAO,KAAK,yLAAyLC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,ycAAycC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,6iBAA6iBC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,MAAM,MAAM,IAAI,QAAQ,WAAW,SAAS,SAAS,OAAO,2RAA2R,4BAA4B,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,qoBAAqoB,kBAAkB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,2YAA2Y,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,+eAA+e,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,+9BAA+9BC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,qjBAAqjB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,2qBAA2qB,aAAa,CAAC,IAAI,IAAI,CAAC,KAAK,kBAAkB,OAAO,8WAA8WC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qlCAAqlC,iBAAiB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,ucAAuc,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,4vBAA4vB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,iWAAiW,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,+cAA+clT,UAAU,CAAC,IAAI,IAAI,CAAC,KAAK,OAAO,8QAA8QmT,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,iLAAiLC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,scAAscC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,63BAA63B,cAAc,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,SAAS,SAAS,eAAe,OAAO,kVAAkVrsB,IAAI,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,qOAAqOssB,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,2fAA2f,aAAa,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,84BAA84B,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,2QAA2QC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,klBAAklB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,8RAA8R,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,qfAAqf,eAAe,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6SAA6S,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,6rBAA6rB,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,yhBAAyhBC,OAAO,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,4LAA4L,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,qrBAAqrBC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,ybAAyb,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,4KAA4K,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,ieAAie,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,69BAA69B,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,yKAAyKC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,ivFAAivF,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,wSAAwSC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,yPAAyPC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,urBAAurB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,0aAA0aC,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,oGAAoG,gBAAgB,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,4KAA4KC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,gfAAgf,WAAW,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,yrBAAyrB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,meAAme,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,+YAA+Y,kBAAkB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,4RAA4RC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wgBAAwgB,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,2ZAA2Z,gBAAgB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,oOAAoOC,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,4aAA4a,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,kUAAkU,qBAAqB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,wZAAwZC,QAAQ,CAAC,IAAI,IAAI,CAAC,MAAM,MAAM,cAAc,KAAK,uNAAuN,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,wiBAAwiB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,oZAAoZC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,2OAA2O,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,qVAAqVC,UAAU,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,cAAc,OAAO,gVAAgVC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,uqBAAuqB,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,iZAAiZC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6iBAA6iB,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,6qBAA6qBC,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,4pBAA4pBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,4UAA4UC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,0mBAA0mBC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,uUAAuUC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,y7DAAy7DC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,qYAAqY,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,omBAAomBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,2jBAA2jB,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,eAAe,OAAO,skBAAskB,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0RAA0R,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,iZAAiZC,YAAY,CAAC,IAAI,IAAI,CAAC,KAAK,mBAAmB,OAAO,mvBAAmvBC,QAAQ,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0eAA0e,kBAAkB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,sQAAsQ,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,k5BAAk5BC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,w3CAAw3C,gBAAgB,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,wTAAwT9U,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,muBAAmuB,cAAc,CAAC,IAAI,IAAI,CAAC,IAAI,qBAAqB,OAAO,iVAAiV+U,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,urBAAurB,oBAAoB,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,odAAod,uBAAuB,CAAC,IAAI,IAAI,CAAC,OAAO,mBAAmB,OAAO,u2CAAu2C,cAAc,CAAC,IAAI,IAAI,CAAC,yBAAyB,OAAO,oVAAoVC,OAAO,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,oNAAoN,sBAAsB,CAAC,IAAI,IAAI,CAAC,sBAAsB,OAAO,2jBAA2jB,gBAAgB,CAAC,IAAI,IAAI,GAAG,OAAO,yZAAyZ,gBAAgB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,0vBAA0vB,sBAAsB,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,qMAAqM,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,yXAAyX,UAAU,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,saAAsa,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,kmBAAkmBC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,qiBAAqiB,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,6NAA6NC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,6iBAA6iBC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ohBAAohBlV,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,OAAO,kRAAkR,mBAAmB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,sUAAsUmV,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,gLAAgL,WAAW,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,6OAA6OC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,yQAAyQ,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,2OAA2O,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,qpBAAqpB,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,UAAU,OAAO,4eAA4eC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2eAA2e,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,ocAAocC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ooBAAooB,gBAAgB,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,4/BAA4/B,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,gBAAgB,OAAO,kVAAkVC,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,KAAK,4iBAA4iB,qCAAqC,CAAC,IAAI,IAAI,CAAC,cAAc,OAAO,+ZAA+Z,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,0VAA0V7xB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,0IAA0I8xB,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,4iCAA4iC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,ufAAufC,SAAS,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,OAAO,2jBAA2jB,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,obAAob,4BAA4B,CAAC,IAAI,IAAI,GAAG,OAAO,+jBAA+jBC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,wRAAwRC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,ygBAAygB,yBAAyB,CAAC,IAAI,IAAI,GAAG,OAAO,g6BAAg6B,UAAU,CAAC,IAAI,IAAI,CAAC,KAAK,MAAM,gBAAgB,OAAO,+TAA+T,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,kgCAAkgC1V,SAAS,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,+NAA+N2V,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,ssBAAssBC,MAAM,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,2pCAA2pC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,8YAA8YC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,k8BAAk8B,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,gUAAgU,cAAc,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,yOAAyO,wBAAwB,CAAC,IAAI,IAAI,CAAC,OAAO,oBAAoB,OAAO,wwBAAwwB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,wVAAwV,0BAA0B,CAAC,IAAI,IAAI,GAAG,OAAO,2hBAA2hB,aAAa,CAAC,IAAI,IAAI,CAAC,eAAe,cAAc,UAAU,OAAO,mwBAAmwB5V,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,6NAA6N,iCAAiC,CAAC,IAAI,IAAI,GAAG,OAAO,i+BAAi+B,eAAe,CAAC,IAAI,IAAI,CAAC,qBAAqB,OAAO,srBAAsrB,oBAAoB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,6ZAA6Z6V,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,sfAAsf,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,2ZAA2Z,aAAa,CAAC,IAAI,IAAI,CAAC,SAAS,OAAO,osBAAosBC,OAAO,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,0lBAA0lB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,wMAAwM,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,SAAS,OAAO,6qBAA6qB,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,kkBAAkkB,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,6RAA6RC,IAAI,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,yTAAyT,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,onBAAonB,iBAAiB,CAAC,IAAI,IAAI,CAAC,KAAK,iBAAiB,OAAO,0tBAA0tB,oBAAoB,CAAC,IAAI,IAAI,CAAC,MAAM,aAAa,mBAAmB,OAAO,gOAAgOC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,8nBAA8nBC,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,eAAe,OAAO,2OAA2OnxB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,oSAAoS,aAAa,CAAC,IAAI,IAAI,CAAC,WAAW,OAAO,0OAA0O,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,otCAAotCoxB,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+WAA+W,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,yWAAyWC,MAAM,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,OAAO,wLAAwL,yBAAyB,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,6RAA6R,qBAAqB,CAAC,IAAI,IAAI,GAAG,OAAO,sgBAAsgB,aAAa,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,4KAA4K,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,kaAAka,oBAAoB,CAAC,IAAI,IAAI,GAAG,OAAO,4jBAA4jB,2BAA2B,CAAC,IAAI,IAAI,GAAG,OAAO,wdAAwd,YAAY,CAAC,IAAI,IAAI,CAAC,KAAK,eAAe,OAAO,kqBAAkqB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,slBAAslBC,UAAU,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,sTAAsT,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,8cAA8c,iBAAiB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,8RAA8R,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,2bAA2bC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,4hBAA4hB,eAAe,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,64CAA64C,wBAAwB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,8lBAA8lB,kBAAkB,CAAC,IAAI,IAAI,CAAC,MAAM,kBAAkB,kBAAkB,gBAAgB,OAAO,mVAAmV,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,8aAA8a,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,QAAQ,OAAO,wcAAwc,sBAAsB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,q8CAAq8CC,UAAU,CAAC,IAAI,IAAI,GAAG,OAAO,qyBAAqyB,uBAAuB,CAAC,IAAI,IAAI,CAAC,KAAK,uBAAuB,WAAW,OAAO,mTAAmTC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,saAAsaC,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,qWAAqW,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,wbAAwb,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,4xBAA4xB,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,qkBAAqkB,aAAa,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,qaAAqa,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,8UAA8U,0BAA0B,CAAC,IAAI,IAAI,GAAG,OAAO,8kBAA8kBC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,+lDAA+lD,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,0XAA0X,oBAAoB,CAAC,IAAI,IAAI,CAAC,uBAAuB,OAAO,ylBAAylB,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,o4BAAo4BC,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,WAAW,OAAO,khCAAkhC,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,mUAAmU,eAAe,CAAC,IAAI,IAAI,CAAC,eAAe,iBAAiB,OAAO,0iCAA0iC,iBAAiB,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,yUAAyU,eAAe,CAAC,IAAI,IAAI,CAAC,OAAO,WAAW,OAAO,4ZAA4Z,kBAAkB,CAAC,IAAI,IAAI,CAAC,kBAAkB,OAAO,8fAA8f,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,kBAAkB,OAAO,oRAAoR,6BAA6B,CAAC,IAAI,IAAI,GAAG,OAAO,g5BAAg5B,kBAAkB,CAAC,IAAI,IAAI,CAAC,mBAAmB,OAAO,yWAAyWC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,+iBAA+iB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,qLAAqLC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,oqBAAoqBC,QAAQ,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,wUAAwU,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,2TAA2T,eAAe,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,gBAAgB,OAAO,4RAA4RC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,q9BAAq9BC,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,65BAA65B,aAAa,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,OAAO,qlCAAqlC,eAAe,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,4bAA4b,wBAAwB,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,+WAA+W,YAAY,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,gkBAAgkB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,6WAA6W,YAAY,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,uqBAAuqB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,ofAAof,kBAAkB,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,2FAA2F,aAAa,CAAC,IAAI,IAAI,CAAC,UAAU,OAAO,8QAA8QC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,6ZAA6ZC,KAAK,CAAC,IAAI,IAAI,GAAG,OAAO,4SAA4S,yBAAyB,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,oRAAoR,iBAAiB,CAAC,IAAI,IAAI,GAAG,OAAO,sRAAsR,aAAa,CAAC,IAAI,IAAI,CAAC,YAAY,OAAO,oLAAoL,aAAa,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,krBAAkrB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,woBAAwoBC,QAAQ,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ukBAAukB,YAAY,CAAC,IAAI,IAAI,CAAC,MAAM,OAAO,qzBAAqzB,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,+zBAA+zB,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,OAAO,MAAM,OAAO,OAAO,OAAO,MAAM,OAAO,OAAO,qBAAqBC,OAAO,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,qOAAqOC,IAAI,CAAC,IAAI,IAAI,GAAG,OAAO,qTAAqT,cAAc,CAAC,IAAI,IAAI,CAAC,MAAM,eAAe,OAAO,sLAAsL,YAAY,CAAC,IAAI,IAAI,GAAG,OAAO,uxBAAuxB,2BAA2B,CAAC,IAAI,IAAI,GAAG,OAAO,wrCAAwrCC,KAAK,CAAC,IAAI,IAAI,CAAC,QAAQ,OAAO,ifAAif,eAAe,CAAC,IAAI,IAAI,GAAG,OAAO,klBAAklB,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,YAAY,OAAO,sZAAsZC,MAAM,CAAC,IAAI,IAAI,GAAG,OAAO,0aAA0a,mBAAmB,CAAC,IAAI,IAAI,GAAG,OAAO,yuBAAyuBC,YAAY,CAAC,GAAG,IAAI,CAAC,MAAM,MAAM,OAAO,KAAK,gIAAgI,cAAc,CAAC,IAAI,IAAI,GAAG,OAAO,u+BAAu+BC,MAAM,CAAC,IAAI,IAAI,CAAC,OAAO,OAAO,MAAM,OAAO,gXAAgX,oBAAoB,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,idAAid,cAAc,CAAC,IAAI,IAAI,CAAC,OAAO,MAAM,SAAS,OAAO,KAAK,i5BAAi5BC,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,8RAA8R,0BAA0B,CAAC,IAAI,IAAI,CAAC,iBAAiB,OAAO,66BAA66B,aAAa,CAAC,IAAI,IAAI,CAAC,aAAa,OAAO,imEAAimE,2BAA2B,CAAC,IAAI,IAAI,GAAG,OAAO,mgBAAmgB,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,cAAc,SAAS,cAAc,OAAO,wcAAwcC,SAAS,CAAC,IAAI,IAAI,GAAG,OAAO,+oBAA+oBC,OAAO,CAAC,IAAI,IAAI,GAAG,OAAO,6UAA6UxvB,EAAE,CAAC,IAAI,IAAI,CAAC,KAAK,KAAK,sKAAsKyvB,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,qUAAqU,gBAAgB,CAAC,IAAI,IAAI,CAAC,OAAO,iBAAiB,OAAO,q5CAAq5C,WAAW,CAAC,IAAI,IAAI,GAAG,OAAO,qUAAqU,kBAAkB,CAAC,IAAI,IAAI,GAAG,OAAO,orBAAorB,mBAAmB,CAAC,IAAI,IAAI,CAAC,OAAO,iBAAiB,OAAO,4qBAA4qB,cAAc,CAAC,IAAI,IAAI,CAAC,eAAe,OAAO,6jBAA6jB,qBAAqB,CAAC,IAAI,IAAI,CAAC,oBAAoB,OAAO,+bAA+bC,QAAQ,CAAC,IAAI,IAAI,GAAG,OAAO,2kBAA2kB,aAAa,CAAC,IAAI,IAAI,GAAG,OAAO,sRAAsRC,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,OAAO,6QAA6Q,iBAAiB,CAAC,IAAI,IAAI,CAAC,OAAO,gBAAgB,OAAO,wvBAAwvB,uBAAuB,CAAC,IAAI,IAAI,GAAG,OAAO,qiBAAqiB,sBAAsB,CAAC,IAAI,IAAI,GAAG,OAAO,kjBAAkjB,UAAU,CAAC,IAAI,IAAI,CAAC,MAAM,gBAAgB,OAAO,uRAAuR,SAASz1B,GAAG,IAAI,IAAI,IAAIC,EAAEmB,UAAUC,OAAOhB,EAAE,IAAI0B,MAAM,EAAE9B,EAAEA,EAAE,EAAE,GAAGG,EAAE,EAAEA,EAAEH,EAAEG,IAAIC,EAAED,EAAE,GAAGgB,UAAUhB,GAAGJ,EAAEkB,WAAM,EAAOb,EAAE,CAAC,MAAML,GAAG,IAAIsC,EAAE,MAAMtC,CAAC,CAAC,CAA3I,EAA6I,WAAW8F,EAAE,MAAMG,GAAGH,EAAE,WAAWG,EAAE,GAAE,CAApwnyB,GAAwwnyB,WAAW,aAAa,SAAS7F,EAAEH,EAAED,GAAG,IAAIK,EAAED,EAAEO,OAAOC,KAAKX,GAAG,OAAOU,OAAOE,wBAAwBR,EAAEM,OAAOE,sBAAsBZ,GAAGD,IAAIK,EAAEA,EAAES,QAAO,SAASd,GAAG,OAAOW,OAAOI,yBAAyBd,EAAED,GAAGgB,UAAU,KAAIZ,EAAEa,KAAKC,MAAMd,EAAEC,IAAID,CAAC,CAAC,SAASsF,EAAEzF,GAAG,IAAI,IAAID,EAAE,EAAEA,EAAEoB,UAAUC,OAAOrB,IAAI,CAAC,IAAIK,EAAE,MAAMe,UAAUpB,GAAGoB,UAAUpB,GAAG,CAAC,EAAEA,EAAE,EAAEI,EAAEO,OAAON,IAAG,GAAIiB,SAAQ,SAAStB,GAAGmB,EAAElB,EAAED,EAAEK,EAAEL,GAAG,IAAGW,OAAOa,0BAA0Bb,OAAOc,iBAAiBxB,EAAEU,OAAOa,0BAA0BnB,IAAID,EAAEO,OAAON,IAAIiB,SAAQ,SAAStB,GAAGW,OAAOe,eAAezB,EAAED,EAAEW,OAAOI,yBAAyBV,EAAEL,GAAG,GAAE,CAAC,OAAOC,CAAC,CAAC,SAASO,EAAER,GAAG,OAAOQ,EAAE,mBAAmBmE,QAAQ,iBAAiBA,OAAOC,SAAS,SAAS5E,GAAG,cAAcA,CAAC,EAAE,SAASA,GAAG,OAAOA,GAAG,mBAAmB2E,QAAQ3E,EAAEkF,cAAcP,QAAQ3E,IAAI2E,OAAOG,UAAU,gBAAgB9E,CAAC,GAAGA,EAAE,CAAuK,SAASmB,EAAEnB,EAAEC,EAAEI,GAAG,OAAOJ,KAAKD,EAAEW,OAAOe,eAAe1B,EAAEC,EAAE,CAAC0B,MAAMtB,EAAEW,YAAW,EAAGY,cAAa,EAAGC,UAAS,IAAK7B,EAAEC,GAAGI,EAAEL,CAAC,CAAC,SAASuC,EAAEvC,EAAEC,GAAG,OAAO,SAASD,GAAG,GAAG+B,MAAM2C,QAAQ1E,GAAG,OAAOA,CAAC,CAAxC,CAA0CA,IAAI,SAASA,EAAEC,GAAG,IAAII,EAAE,MAAML,EAAE,KAAK,oBAAoB2E,QAAQ3E,EAAE2E,OAAOC,WAAW5E,EAAE,cAAc,GAAG,MAAMK,EAAE,CAAC,IAAID,EAAEI,EAAEC,EAAE,GAAGC,GAAE,EAAGS,GAAE,EAAG,IAAI,IAAId,EAAEA,EAAE2E,KAAKhF,KAAKU,GAAGN,EAAEC,EAAEq1B,QAAQC,QAAQl1B,EAAEQ,KAAKb,EAAEuB,QAAQ1B,GAAGQ,EAAEY,SAASpB,GAAGS,GAAE,GAAI,CAAC,MAAMV,GAAGmB,GAAE,EAAGX,EAAER,CAAC,CAAC,QAAQ,IAAIU,GAAG,MAAML,EAAEu1B,QAAQv1B,EAAEu1B,QAAQ,CAAC,QAAQ,GAAGz0B,EAAE,MAAMX,CAAC,CAAC,CAAC,OAAOC,CAAC,CAAC,CAArT,CAAuTT,EAAEC,IAAII,EAAEL,EAAEC,IAAI,WAAW,MAAM,IAAIoF,UAAU,4IAA4I,CAA3K,EAA8K,CAAC,SAAShD,EAAErC,GAAG,OAAO,SAASA,GAAG,GAAG+B,MAAM2C,QAAQ1E,GAAG,OAAOU,EAAEV,EAAE,CAA3C,CAA6CA,IAAI,SAASA,GAAG,GAAG,oBAAoB2E,QAAQ,MAAM3E,EAAE2E,OAAOC,WAAW,MAAM5E,EAAE,cAAc,OAAO+B,MAAM8C,KAAK7E,EAAE,CAA/G,CAAiHA,IAAIK,EAAEL,IAAI,WAAW,MAAM,IAAIqF,UAAU,uIAAuI,CAAtK,EAAyK,CAAC,SAAShF,EAAEL,EAAEC,GAAG,GAAGD,EAAE,CAAC,GAAG,iBAAiBA,EAAE,OAAOU,EAAEV,EAAEC,GAAG,IAAII,EAAEM,OAAOmE,UAAUC,SAASC,KAAKhF,GAAGiF,MAAM,GAAG,GAAG,MAAM,SAAS5E,EAAE,WAAWA,GAAGL,EAAEkF,YAAYlF,EAAEkF,YAAYC,KAAK9E,IAAI,QAAQA,EAAE0B,MAAM8C,KAAK7E,GAAG,cAAcK,GAAG,2CAA2C+E,KAAK/E,GAAGK,EAAEV,EAAEC,QAAG,CAAM,CAAC,CAAC,SAASS,EAAEV,EAAEC,IAAI,MAAMA,GAAGA,EAAED,EAAEqB,UAAUpB,EAAED,EAAEqB,QAAQ,IAAI,IAAIhB,EAAE,EAAED,EAAE,IAAI2B,MAAM9B,GAAGI,EAAEJ,EAAEI,IAAID,EAAEC,GAAGL,EAAEK,GAAG,OAAOD,CAAC,CAAC,SAASJ,IAAI,CAAC,IAAIC,EAAE,CAAC,EAAEsB,EAAE,CAAC,EAAEO,EAAE,KAAKQ,EAAE,CAACuzB,KAAK71B,EAAE81B,QAAQ91B,GAAG,IAAI,oBAAoBE,SAASD,EAAEC,QAAQ,oBAAoBC,WAAWoB,EAAEpB,UAAU,oBAAoB41B,mBAAmBj0B,EAAEi0B,kBAAkB,oBAAoBC,cAAc1zB,EAAE0zB,YAAY,CAAC,MAAMh2B,GAAG,CAAC,IAAkCyC,OAAE,KAAhCD,GAAGvC,EAAEK,WAAW,CAAC,GAAGC,WAAuB,GAAGiC,EAAEK,EAAE5C,EAAE2D,EAAErC,EAAEmB,EAAEZ,EAAEU,EAAEF,EAAEqB,IAAId,EAAE1C,SAASsF,IAAI7B,EAAE5B,mBAAmB4B,EAAE3B,MAAM,mBAAmB2B,EAAE1B,kBAAkB,mBAAmB0B,EAAEzB,cAAc2D,GAAGrD,EAAEL,QAAQ,UAAUK,EAAEL,QAAQ,YAAwCkrB,GAA5BrtB,EAAE,qBAA4B,kBAAiBoiB,EAAE,gBAAgBlD,EAAE,yBAAyBJ,EAAE,iCAAiC+Q,EAAE,cAAcmG,EAAE,YAAYC,EAAE,oBAAoBd,EAAE,QAAQhT,EAAE,CAAC,OAAO,OAAO,QAAQ,UAAU+T,EAAE,WAAW,IAAI,OAAM,CAAE,CAAC,MAAMn2B,GAAG,OAAM,CAAE,CAAC,CAA1C,GAA8Co2B,EAAE,UAAU3L,EAAE,QAAQ4L,EAAE,CAACD,EAAE3L,GAAG,SAAS6L,EAAEt2B,GAAG,OAAO,IAAI2C,MAAM3C,EAAE,CAAC4C,IAAI,SAAS5C,EAAEC,GAAG,OAAOA,KAAKD,EAAEA,EAAEC,GAAGD,EAAEo2B,EAAE,GAAG,CAAC,IAAIG,EAAED,GAAGn1B,EAAEI,EAAE,CAAC,EAAE60B,EAAE,CAACtzB,GAAG,QAAQC,IAAI,QAAQ,WAAW,QAAQC,IAAI,UAAU,aAAa,UAAUC,IAAI,QAAQ,WAAW,QAAQC,IAAI,OAAO,UAAU,OAAOC,IAAI,UAAU,aAAa,UAAUC,IAAI,SAAS,YAAY,SAASC,IAAI,MAAMC,KAAK,MAAM,SAAS,MAAM,iBAAiB,QAAQnC,EAAEI,EAAEkpB,EAAE,CAAC3nB,GAAG,QAAQS,KAAK,QAAQ,WAAW,QAAQC,KAAK,UAAU,aAAa,UAAUC,KAAK,QAAQ,WAAW,QAAQC,KAAK,OAAO,UAAU,SAASnC,IAAIi1B,EAAEF,GAAGn1B,EAAEW,EAAE,CAAC,EAAEs0B,EAAE,CAACvyB,MAAM,MAAMC,QAAQ,MAAMC,MAAM,MAAMC,KAAK,MAAMC,QAAQ,MAAMC,OAAO,MAAMC,IAAI,QAAQhD,EAAEW,EAAE2oB,EAAE,CAAC5mB,MAAM,OAAOC,QAAQ,OAAOC,MAAM,OAAOC,KAAK,SAASlC,IAAI20B,EAAEH,GAAGn1B,EAAEmB,EAAE,CAAC,EAAE8zB,EAAE,CAAChzB,IAAI,YAAYD,IAAI,aAAaE,IAAI,SAASJ,IAAI,WAAWD,IAAI,aAAaD,IAAI,WAAWG,IAAI,YAAY/B,EAAEmB,EAAEmoB,EAAE,CAAClnB,KAAK,WAAWC,KAAK,aAAaC,KAAK,WAAWC,KAAK,YAAYpB,IAAIo0B,EAAEJ,GAAGn1B,EAAEsB,EAAE,CAAC,EAAE2zB,EAAE,CAAC,YAAY,MAAM,aAAa,MAAM,SAAS,MAAM,WAAW,MAAM,aAAa,MAAM,WAAW,MAAM,UAAU,QAAQj1B,EAAEsB,EAAEgoB,EAAE,CAAC,WAAW,OAAO,aAAa,OAAO,WAAW,OAAO,UAAU,SAAShoB,IAAIk0B,EAAE,uCAAuCC,EAAE,iBAAiBC,EAAE,0FAA0FC,EAAER,GAAGn1B,EAAEI,EAAE,CAAC,EAAE60B,EAAE,CAAC,IAAI,MAAM,IAAI,MAAMhyB,OAAO,MAAM,IAAI,MAAM,IAAI,QAAQjD,EAAEI,EAAEkpB,EAAE,CAAC,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO,IAAI,SAASlpB,IAAyEw1B,GAA5Cz0B,GAAzBR,EAAE,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,KAAQuC,OAAO,CAAC,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,GAAG,KAAO,CAAC,QAAQ,cAAc,YAAY,oBAAoB,iBAAgB2yB,EAAE,CAACC,MAAM,gBAAgBC,aAAa,eAAeC,QAAQ,UAAUC,UAAU,aAAa30B,EAAE,IAAI6B,IAAI3D,OAAOC,KAAK41B,EAAEJ,IAAI7xB,IAAI9B,EAAE+B,IAAIC,KAAKhC,IAAI9B,OAAOC,KAAK41B,EAAE/L,IAAIlmB,IAAI9B,EAAE+B,IAAIC,KAAKhC,IAAI,IAAI40B,EAAE,GAAGhzB,OAAOgyB,EAAEh0B,EAAEI,GAAG,CAAC,MAAM,KAAK,KAAK,KAAK,KAAK,MAAM,OAAO,SAAS,OAAO,YAAY,SAAS,YAAY,kBAAkB,gBAAgB,OAAO,KAAK,UAAU,iBAAiB,cAAc,SAAS,KAAK,YAAY,aAAa,QAAQ,aAAa,aAAa,YAAY,YAAY,QAAQ,aAAa,eAAe,OAAO,WAAW,WAAW,QAAQ,KAAKu0B,EAAEC,MAAMD,EAAEE,aAAaF,EAAEG,QAAQH,EAAEI,YAAY/yB,OAAOvC,EAAEyC,KAAI,SAASvE,GAAG,MAAM,GAAGqE,OAAOrE,EAAE,IAAI,KAAIqE,OAAO/B,EAAEiC,KAAI,SAASvE,GAAG,MAAM,KAAKqE,OAAOrE,EAAE,KAAIs3B,EAAEz0B,EAAE00B,mBAAmB,CAAC,EAAE3zB,GAAG,mBAAmBA,EAAE4zB,eAAe,CAAC,CAAC,qBAAqB,gBAAgB,CAAC,kBAAkB,aAAa,CAAC,sBAAsB,iBAAiB,CAAC,qBAAqB,gBAAgB,CAAC,yBAAyB,oBAAoB,CAAC,wBAAwB,kBAAkB,CAAC,oBAAoB,cAAc,CAAC,iBAAiB,YAAY,CAAC,8BAA8B,wBAAwB,CAAC,yBAAyB,oBAAoB,CAAC,uBAAuB,kBAAkB,CAAC,4BAA4B,sBAAsB,CAAC,2BAA2B,sBAAsB,CAAC,0BAA0B,qBAAqBl2B,SAAQ,SAAStB,GAAgBA,GAATC,EAAEsC,EAAEvC,EAAE,IAAO,GAAjB,IAAoBC,EAAEA,EAAE,GAAGD,EAAE,MAAMA,EAAE,SAASA,GAAG,IAAIC,EAAE2D,EAAE4zB,cAAc,UAAUx3B,EAAE,KAAK,GAAGC,EAAE,OAAOA,EAAEw3B,aAAaz3B,EAAE,CAAhF,CAAkFA,KAAK,UAAUA,IAAI,SAASA,GAAGA,GAAG,MAAMA,IAAIs3B,EAAEr3B,GAAGD,EAAE,IAAGuB,EAAE,CAACm2B,aAAa,QAAQC,cAAc,UAAUC,UAAU,KAAKC,iBAAiBvK,EAAEwK,gBAAe,EAAGC,YAAW,EAAGC,UAAS,EAAGC,sBAAqB,EAAGC,kBAAiB,EAAGC,eAAe,QAAQC,oBAAmB,EAAGC,oBAAmB,EAAGC,kBAAiB,GAAIhB,EAAEiB,eAAejB,EAAEM,UAAUN,EAAEiB,cAAc,IAAIC,EAAE9yB,EAAEA,EAAE,CAAC,EAAEnE,GAAG+1B,GAAGkB,EAAEV,iBAAiBU,EAAEN,kBAAiB,GAAI,IAAIO,EAAE,CAAC,EAAE93B,OAAOC,KAAKW,GAAGD,SAAQ,SAASrB,GAAGU,OAAOe,eAAe+2B,EAAEx4B,EAAE,CAACe,YAAW,EAAG03B,IAAI,SAAS14B,GAAGw4B,EAAEv4B,GAAGD,EAAE24B,EAAEr3B,SAAQ,SAAStB,GAAG,OAAOA,EAAEy4B,EAAE,GAAE,EAAE71B,IAAI,WAAW,OAAO41B,EAAEv4B,EAAE,GAAG,IAAGU,OAAOe,eAAe+2B,EAAE,eAAe,CAACz3B,YAAW,EAAG03B,IAAI,SAAS14B,GAAGw4B,EAAEZ,UAAU53B,EAAE24B,EAAEr3B,SAAQ,SAAStB,GAAG,OAAOA,EAAEy4B,EAAE,GAAE,EAAE71B,IAAI,WAAW,OAAO41B,EAAEZ,SAAS,IAAI/0B,EAAE00B,kBAAkBkB,EAAE,IAAIE,EAAE,GAAOC,EAAtgI,GAA0gIC,EAAE,CAACC,KAAK,GAAG1D,EAAE,EAAEtF,EAAE,EAAE+B,OAAO,EAAEkH,OAAM,EAAGC,OAAM,GAAQC,EAAG,iEAAiE,SAASC,IAAK,IAAI,IAAIl5B,EAAE,GAAGC,EAAE,GAAG,EAAED,KAAKC,GAAGg5B,EAAG,GAAGE,KAAKC,SAAS,GAAG,OAAOn5B,CAAC,CAAC,SAASo5B,GAAGr5B,GAAG,IAAI,IAAIC,EAAE,GAAGI,GAAGL,GAAG,IAAIqB,SAAS,EAAEhB,KAAKJ,EAAEI,GAAGL,EAAEK,GAAG,OAAOJ,CAAC,CAAC,SAASq5B,GAAGt5B,GAAG,OAAOA,EAAEu5B,UAAUF,GAAGr5B,EAAEu5B,YAAYv5B,EAAEy3B,aAAa,UAAU,IAAI+B,MAAM,KAAK14B,QAAO,SAASd,GAAG,OAAOA,CAAC,GAAE,CAAC,SAASy5B,GAAGz5B,GAAG,MAAM,GAAGqE,OAAOrE,GAAG05B,QAAQ,KAAK,SAASA,QAAQ,KAAK,UAAUA,QAAQ,KAAK,SAASA,QAAQ,KAAK,QAAQA,QAAQ,KAAK,OAAO,CAAC,SAASC,GAAGt5B,GAAG,OAAOM,OAAOC,KAAKP,GAAG,CAAC,GAAGsF,QAAO,SAAS3F,EAAEC,GAAG,OAAOD,EAAE,GAAGqE,OAAOpE,EAAE,MAAMoE,OAAOhE,EAAEJ,GAAG25B,OAAO,IAAI,GAAE,GAAG,CAAC,SAASC,GAAG75B,GAAG,OAAOA,EAAE84B,OAAOD,EAAEC,MAAM94B,EAAEo1B,IAAIyD,EAAEzD,GAAGp1B,EAAE8vB,IAAI+I,EAAE/I,GAAG9vB,EAAE6xB,SAASgH,EAAEhH,QAAQ7xB,EAAE+4B,OAAO/4B,EAAEg5B,KAAK,CAAC,SAASc,KAAK,IAAI95B,EAAEC,EAAEI,EAAEitB,EAAEltB,EAAEq4B,EAAEb,UAAUp3B,EAAEi4B,EAAEZ,iBAAiBp3B,EAAE,koiBAAkoiB,MAAM,OAAOL,GAAGI,IAAIH,IAAIL,EAAE,IAAI+5B,OAAO,MAAM11B,OAAO,KAAK,OAAO,KAAKpE,EAAE,IAAI85B,OAAO,OAAO11B,OAAO,KAAK,OAAO,KAAKhE,EAAE,IAAI05B,OAAO,MAAM11B,OAAOhE,GAAG,KAAKI,EAAEA,EAAEi5B,QAAQ15B,EAAE,IAAIqE,OAAOjE,EAAE,MAAMs5B,QAAQz5B,EAAE,KAAKoE,OAAOjE,EAAE,MAAMs5B,QAAQr5B,EAAE,IAAIgE,OAAO7D,KAAKC,CAAC,CAAC,IAAIu5B,IAAG,EAAG,SAASC,KAAKxB,EAAEV,aAAaiC,KAAK,SAASh6B,GAAG,GAAGA,GAAGyF,EAAE,CAAC,IAAIxF,EAAE2D,EAAEzB,cAAc,SAASlC,EAAEi6B,aAAa,OAAO,YAAYj6B,EAAEk6B,UAAUn6B,EAAE,IAAI,IAAIK,EAAEuD,EAAE3B,KAAKm4B,WAAWh6B,EAAE,KAAKI,EAAEH,EAAEgB,OAAO,GAAG,EAAEb,EAAEA,IAAI,CAAC,IAAIC,EAAEJ,EAAEG,GAAGE,GAAGD,EAAE45B,SAAS,IAAIC,eAAe,EAAE,CAAC,QAAQ,QAAQl4B,QAAQ1B,KAAKN,EAAEK,EAAE,CAACmD,EAAE3B,KAAKs4B,aAAat6B,EAAEG,EAAE,CAAC,CAAxQ,CAA0Q05B,MAAME,IAAG,EAAG,CAACv3B,EAAE,CAAC+3B,OAAO,WAAW,MAAM,CAACC,IAAI,CAACC,IAAIZ,GAAGa,UAAUV,IAAI,EAAE10B,MAAM,WAAW,MAAM,CAACq1B,yBAAyB,WAAWX,IAAI,EAAEY,YAAY,WAAWZ,IAAI,EAAE,IAAGn4B,EAAEe,GAAG,CAAC,GAAI5C,KAAK6B,EAAE7B,GAAG,CAAC,GAAG6B,EAAE7B,GAAGqF,SAASxD,EAAE7B,GAAGqF,OAAO,CAAC,GAAGxD,EAAE7B,GAAGsF,QAAQzD,EAAE7B,GAAGsF,MAAM,CAAC,GAAGzD,EAAE7B,GAAGuF,QAAQ1D,EAAE7B,GAAGuF,MAAM,IAAoG,IAAIs1B,GAAGh5B,EAAE7B,GAAG86B,GAAG,GAAGC,IAAG,EAAG,SAASC,GAAGj7B,GAAGyF,IAAIu1B,GAAGE,WAAWl7B,EAAE,GAAG+6B,GAAG95B,KAAKjB,GAAG,CAAC,SAASm7B,GAAGn7B,GAAG,IAAIK,EAAEJ,EAAED,EAAE6qB,IAAmBrqB,OAAE,KAAjBJ,EAAEJ,EAAEo7B,YAAwB,CAAC,EAAEh7B,EAAeA,OAAE,KAAfA,EAAEJ,EAAEknB,UAAsB,GAAG9mB,EAAE,MAAM,iBAAiBJ,EAAEy5B,GAAGz5B,GAAG,IAAIqE,OAAOpE,EAAE,KAAKoE,QAAQhE,EAAEG,EAAEG,OAAOC,KAAKP,GAAG,CAAC,GAAGsF,QAAO,SAAS3F,EAAEC,GAAG,OAAOD,EAAE,GAAGqE,OAAOpE,EAAE,MAAMoE,OAAOo1B,GAAGp5B,EAAEJ,IAAI,KAAK,GAAE,IAAI25B,QAAQ,KAAKv1B,OAAOjE,EAAEmE,IAAI42B,IAAIE,KAAK,IAAI,MAAMh3B,OAAOpE,EAAE,IAAI,CAAC,SAASq7B,GAAGt7B,EAAEC,EAAEI,GAAG,GAAGL,GAAGA,EAAEC,IAAID,EAAEC,GAAGI,GAAG,MAAM,CAACk7B,OAAOt7B,EAAE4F,SAASxF,EAAEuF,KAAK5F,EAAEC,GAAGI,GAAG,CAAmI,SAASm7B,GAAGx7B,EAAEC,EAAEI,EAAED,GAAG,IAAI,IAAII,EAAEC,EAAEC,EAAEC,OAAOC,KAAKZ,GAAGmB,EAAET,EAAEW,OAAOE,OAAE,IAASnB,EAAEq7B,GAAGx7B,EAAEG,GAAGH,EAAE6B,OAAE,IAASzB,GAAGG,EAAE,EAAER,EAAEU,EAAE,MAAMF,EAAE,EAAEH,GAAGG,EAAEW,EAAEX,IAAIsB,EAAEP,EAAEO,EAAE9B,EAAES,EAAEC,EAAEF,IAAIC,EAAET,GAAG,OAAO8B,CAAC,CAA9R2D,KAAKu1B,IAAIp3B,EAAE5B,gBAAgB05B,SAAS,aAAa,iBAAiBt2B,KAAKxB,EAAE+3B,cAAc/3B,EAAE1B,iBAAiB,oBAAvpB,SAAS05B,IAAKh4B,EAAEi4B,oBAAoB,mBAAmBD,GAAIZ,GAAG,EAAED,GAAGx2B,KAAI,SAASvE,GAAG,OAAOA,GAAG,GAAE,KAA6uB,IAAIy7B,GAAG,SAASj7B,EAAEC,GAAG,OAAO,SAAST,EAAEC,EAAEI,EAAED,GAAG,OAAOI,EAAEwE,KAAKvE,EAAET,EAAEC,EAAEI,EAAED,EAAE,CAAC,EAAE,SAAS07B,GAAG97B,GAAuN,OAApNA,EAAE,SAASA,GAAG,IAAI,IAAIC,EAAE,GAAGI,EAAE,EAAED,EAAEJ,EAAEqB,OAAOhB,EAAED,GAAG,CAAC,IAAII,EAAEC,EAAET,EAAE+7B,WAAW17B,KAAK,OAAOI,GAAGA,GAAG,OAAOJ,EAAED,EAAE,QAAQ,OAAOI,EAAER,EAAE+7B,WAAW17B,OAAOJ,EAAEgB,OAAO,KAAKR,IAAI,KAAK,KAAKD,GAAG,QAAQP,EAAEgB,KAAKR,GAAGJ,KAAKJ,EAAEgB,KAAKR,EAAE,CAAC,OAAOR,CAAC,CAA7M,CAA+MD,GAAU,IAAIA,EAAEqB,OAAOrB,EAAE,GAAG+E,SAAS,IAAI,IAAI,CAAC,SAASi3B,GAAG57B,GAAG,OAAOO,OAAOC,KAAKR,GAAGuF,QAAO,SAAS3F,EAAEC,GAAG,IAAII,EAAED,EAAEH,GAAG,OAAQI,EAAEuF,KAAK5F,EAAEK,EAAEwF,UAAUxF,EAAEuF,KAAK5F,EAAEC,GAAGI,EAAEL,CAAC,GAAE,CAAC,EAAE,CAAC,SAASi8B,GAAGj8B,EAAEC,EAAEI,GAAyDA,OAAE,KAApDD,GAAG,EAAEgB,UAAUC,aAAQ,IAAShB,EAAEA,EAAE,CAAC,GAAG0F,YAAwB3F,EAApE,IAAsEA,EAAE47B,GAAG/7B,GAAG,mBAAmB66B,GAAGv1B,MAAMS,SAAS3F,EAAEy6B,GAAGx1B,OAAOtF,GAAG0F,EAAEA,EAAE,CAAC,EAAEo1B,GAAGx1B,OAAOtF,IAAI,CAAC,GAAGI,GAAG06B,GAAGv1B,MAAMS,QAAQhG,EAAEg8B,GAAG/7B,IAAI,QAAQD,GAAGi8B,GAAG,KAAKh8B,EAAE,CAAC,IAAIi8B,GAAGpB,GAAGx1B,OAAO62B,GAAGrB,GAAGt1B,MAAM42B,IAAIj7B,EAAEmB,EAAE,CAAC,EAAE8zB,EAAEz1B,OAAO07B,OAAO5F,EAAEL,KAAKj1B,EAAEmB,EAAEmoB,EAAE9pB,OAAO07B,OAAO5F,EAAEhM,KAAKnoB,GAAGg6B,GAAG,KAAKC,GAAG,CAAC,EAAEC,GAAG,CAAC,EAAEC,GAAG,CAAC,EAAEC,GAAG,CAAC,EAAEC,GAAG,CAAC,EAAEC,IAAIz7B,EAAEI,EAAE,CAAC,EAAE60B,EAAEz1B,OAAOC,KAAK21B,EAAEH,KAAKj1B,EAAEI,EAAEkpB,EAAE9pB,OAAOC,KAAK21B,EAAE9L,KAAKlpB,GAAgH,SAASs7B,KAAK,SAAS78B,EAAEI,GAAG,OAAOo7B,GAAGU,IAAG,SAASl8B,EAAEC,EAAEI,GAAG,OAAOL,EAAEK,GAAGm7B,GAAGv7B,EAAEG,EAAE,CAAC,GAAGJ,CAAC,GAAE,CAAC,EAAE,CAACu8B,GAAGv8B,GAAE,SAASC,EAAED,EAAEK,GAAG,OAAOL,EAAE,KAAKC,EAAED,EAAE,IAAIK,GAAGL,EAAE,IAAIA,EAAE,GAAGc,QAAO,SAASd,GAAG,MAAM,iBAAiBA,CAAC,IAAGsB,SAAQ,SAAStB,GAAGC,EAAED,EAAE+E,SAAS,KAAK1E,CAAC,IAAGJ,CAAC,IAAGu8B,GAAGx8B,GAAE,SAASC,EAAED,EAAEK,GAAG,OAAOJ,EAAEI,GAAGA,EAAEL,EAAE,IAAIA,EAAE,GAAGc,QAAO,SAASd,GAAG,MAAM,iBAAiBA,CAAC,IAAGsB,SAAQ,SAAStB,GAAGC,EAAED,GAAGK,CAAC,IAAGJ,CAAC,IAAG08B,GAAG38B,GAAE,SAASC,EAAED,EAAEK,GAAU,OAAPL,EAAEA,EAAE,GAAUC,EAAEI,GAAGA,EAAEL,EAAEsB,SAAQ,SAAStB,GAAGC,EAAED,GAAGK,CAAC,IAAGJ,CAAC,IAAG,IAAIO,EAAE,QAAQ07B,IAAIzD,EAAEqE,aAAa78B,EAAEu7B,GAAGW,IAAG,SAASn8B,EAAEC,GAAG,IAAII,EAAEJ,EAAE,GAAGG,EAAEH,EAAE,GAAU,OAAPA,EAAEA,EAAE,GAAS,QAAQG,GAAGI,IAAIJ,EAAE,OAAO,iBAAiBC,IAAIL,EAAE+8B,MAAM18B,GAAG,CAACk7B,OAAOn7B,EAAEyF,SAAS5F,IAAI,iBAAiBI,IAAIL,EAAEg9B,SAAS38B,EAAE0E,SAAS,KAAK,CAACw2B,OAAOn7B,EAAEyF,SAAS5F,IAAID,CAAC,GAAE,CAAC+8B,MAAM,CAAC,EAAEC,SAAS,CAAC,IAAIP,GAAGx8B,EAAE88B,MAAML,GAAGz8B,EAAE+8B,SAASV,GAAGW,GAAGxE,EAAEf,aAAa,CAACwF,OAAOzE,EAAEd,eAAe,CAAC,SAASwF,GAAGn9B,EAAEC,GAAG,OAAOs8B,GAAGv8B,IAAI,CAAC,GAAGC,EAAE,CAAC,SAASm9B,GAAGp9B,EAAEC,GAAG,OAAO08B,GAAG38B,IAAI,CAAC,GAAGC,EAAE,CAAC,SAASo9B,GAAGr9B,GAAG,OAAOy8B,GAAGz8B,IAAI,CAACu7B,OAAO,KAAK11B,SAAS,KAAK,CAAsI,SAASo3B,GAAGj9B,EAAEC,GAAG,IAAII,GAAG,EAAEe,UAAUC,aAAQ,IAASpB,EAAEA,EAAE,CAAC,GAAGi9B,OAA6E,OAArD78B,EAAEk2B,EAAnBt2B,OAAE,IAASI,EAAE+1B,EAAE/1B,GAASL,GAAGK,EAAEm2B,EAAEv2B,GAAGD,IAAIw2B,EAAEv2B,GAAGI,GAAGL,EAAEA,KAAK86B,GAAGx1B,OAAOtF,EAAE,KAAYK,GAAGL,GAAG,IAAI,CAAhS8B,EAAE,SAAS9B,GAAGs8B,GAAGW,GAAGj9B,EAAE03B,aAAa,CAACwF,OAAOzE,EAAEd,eAAe,EAAEgB,EAAE13B,KAAKa,GAAG+6B,KAAyN,IAAIS,IAAIn8B,EAAElB,EAAE,CAAC,EAAEm2B,EAAEz1B,OAAOC,KAAK61B,EAAEL,KAAKj1B,EAAElB,EAAEwqB,EAAE9pB,OAAOC,KAAK61B,EAAEhM,KAAKxqB,GAAG,SAASs9B,GAAGv9B,EAAEC,GAAG,IAAwDG,OAAE,KAAtDH,GAAG,EAAEmB,UAAUC,aAAQ,IAASpB,EAAEA,EAAE,CAAC,GAAGu9B,cAA0Bv9B,EAAEO,GAAGW,EAAElB,EAAE,CAAC,EAAEm2B,EAAE,GAAG/xB,OAAOo0B,EAAEb,UAAU,KAAKvzB,OAAO+xB,IAAIj1B,EAAElB,EAAEwqB,EAAE,GAAGpmB,OAAOo0B,EAAEb,UAAU,KAAKvzB,OAAOomB,IAAIxqB,GAAGQ,EAAE,KAAKC,EAAE01B,EAAmnB,OAAhnBp2B,EAAEy9B,SAASj9B,EAAE41B,KAAKp2B,EAAE09B,MAAK,SAAS19B,GAAG,OAAOs9B,GAAGlH,GAAGqH,SAASz9B,EAAE,OAAMU,EAAE01B,IAAIp2B,EAAEy9B,SAASj9B,EAAEiqB,KAAKzqB,EAAE09B,MAAK,SAAS19B,GAAG,OAAOs9B,GAAG7S,GAAGgT,SAASz9B,EAAE,OAAMU,EAAE+pB,GAAGxqB,EAAED,EAAE2F,QAAO,SAAS3F,EAAEC,GAAG,IAAII,EAA5sD,SAAYL,EAAEC,GAAsBA,GAAfI,EAAEJ,EAAEu5B,MAAM,MAAS,GAAvB,IAA0Bn5B,EAAEA,EAAE4E,MAAM,GAAGo2B,KAAK,KAAK,OAAOp7B,IAAID,GAAG,KAAKK,IAAIg3B,EAAEj1B,QAAQ/B,GAAG,KAAKA,CAAC,CAAkmDs9B,CAAGlF,EAAEb,UAAU33B,GAAG,OAAOi8B,GAAGj8B,IAAIA,EAAEm8B,GAAG17B,GAAG+8B,SAASx9B,GAAGy2B,EAAEh2B,GAAGT,GAAGA,EAAEQ,EAAER,EAAED,EAAEu7B,OAAOt7B,IAAI,EAAE28B,GAAGl8B,GAAG0B,QAAQnC,IAAIQ,EAAER,EAAED,EAAEu7B,OAAO0B,GAAGh9B,EAAE,CAACi9B,OAAOx8B,KAAKL,EAAEL,EAAE6F,SAASxF,EAAEJ,IAAIw4B,EAAEZ,kBAAkB53B,IAAIO,EAAE41B,IAAIn2B,IAAIO,EAAEiqB,IAAIzqB,EAAE49B,KAAK38B,KAAKhB,IAAIG,GAAGJ,EAAEu7B,QAAQv7B,EAAE6F,WAAWxF,EAAE,OAAOI,EAAE48B,GAAGr9B,EAAE6F,UAAU,CAAC,EAAE5F,EAAEm9B,GAAGp9B,EAAEu7B,OAAOv7B,EAAE6F,UAAUxF,EAAEk7B,SAAS96B,EAAE,MAAMT,EAAE6F,SAASxF,EAAEwF,UAAU5F,GAAGD,EAAE6F,SAAS7F,EAAEu7B,OAAOl7B,EAAEk7B,QAAQv7B,EAAEu7B,OAAO,QAAQv7B,EAAEu7B,QAAQW,GAAGl5B,MAAMk5B,GAAGn5B,KAAK01B,EAAEqE,eAAe98B,EAAEu7B,OAAO,QAAQv7B,CAAC,GAA9iC,CAACu7B,OAAO,KAAK11B,SAAS,KAAK+3B,KAAK,MAA6hC59B,EAAEy9B,SAAS,cAAcz9B,EAAEy9B,SAAS,UAAUx9B,EAAEs7B,OAAO,QAAQv7B,EAAEy9B,SAAS,eAAez9B,EAAEy9B,SAAS,UAAUx9B,EAAEs7B,OAAO,OAAOt7B,EAAEs7B,QAAQ76B,IAAI+pB,IAAIyR,GAAG34B,OAAOk1B,EAAEqE,eAAe78B,EAAEs7B,OAAO,OAAOt7B,EAAE4F,SAASu3B,GAAGn9B,EAAEs7B,OAAOt7B,EAAE4F,WAAW5F,EAAE4F,UAAU,OAAO5F,EAAEs7B,QAAQ,OAAO96B,IAAIR,EAAEs7B,OAAOe,IAAI,OAAOr8B,CAAC,CAAKqC,EAAE,WAAW,SAAStC,KAAK,SAASA,EAAEC,GAAG,KAAKD,aAAaC,GAAG,MAAM,IAAIoF,UAAU,oCAAoC,CAA3F,CAA6Fw4B,KAAK79B,GAAG69B,KAAKC,YAAY,CAAC,CAAC,CAAC,IAAI79B,EAAEI,EAAI,OAAOJ,EAAED,GAAGK,EAAE,CAAC,CAACyiB,IAAI,MAAMnhB,MAAM,WAAW,IAAI,IAAItB,EAAEw9B,KAAK79B,EAAEoB,UAAUC,OAAOpB,EAAE,IAAI8B,MAAM/B,GAAGI,EAAE,EAAEA,EAAEJ,EAAEI,IAAIH,EAAEG,GAAGgB,UAAUhB,GAAG,IAAII,EAAEP,EAAE0F,OAAOk4B,KAAKE,iBAAiB,CAAC,GAAGp9B,OAAOC,KAAKJ,GAAGc,SAAQ,SAAStB,GAAGK,EAAEy9B,YAAY99B,GAAG0F,EAAEA,EAAE,CAAC,EAAErF,EAAEy9B,YAAY99B,IAAI,CAAC,GAAGQ,EAAER,IAAIi8B,GAAGj8B,EAAEQ,EAAER,IAAI,IAAIC,EAAEw2B,EAAEL,GAAGp2B,GAAGC,GAAGg8B,GAAGh8B,EAAEO,EAAER,IAAI68B,IAAI,GAAE,GAAG,CAAC/Z,IAAI,QAAQnhB,MAAM,WAAWk8B,KAAKC,YAAY,CAAC,CAAC,GAAG,CAAChb,IAAI,mBAAmBnhB,MAAM,SAASnB,EAAER,GAAG,IAAIS,EAAET,EAAEu7B,QAAQv7B,EAAE6F,UAAU7F,EAAE4F,KAAK,CAAC,EAAE5F,GAAGA,EAAE,OAAOW,OAAOC,KAAKH,GAAG8D,KAAI,SAASvE,GAAG,IAAWK,GAAPJ,EAAEQ,EAAET,IAAOu7B,OAAoBn7B,GAAbJ,EAAEC,EAAE4F,SAAW5F,EAAE2F,MAAK3F,EAAEG,EAAE,GAAGI,EAAEH,KAAKG,EAAEH,GAAG,CAAC,GAAG,EAAEJ,EAAEoB,QAAQpB,EAAEqB,SAAQ,SAAStB,GAAG,iBAAiBA,IAAIQ,EAAEH,GAAGL,GAAGI,EAAE,IAAGI,EAAEH,GAAGL,GAAGI,CAAC,IAAGI,CAAC,MAA3r7B,SAAWR,EAAEC,GAAG,IAAI,IAAII,EAAE,EAAEA,EAAEJ,EAAEoB,OAAOhB,IAAI,CAAC,IAAID,EAAEH,EAAEI,GAAGD,EAAEY,WAAWZ,EAAEY,aAAY,EAAGZ,EAAEwB,cAAa,EAAG,UAAUxB,IAAIA,EAAEyB,UAAS,GAAIlB,OAAOe,eAAe1B,EAAEI,EAAE0iB,IAAI1iB,EAAE,CAAC,CAA4h7BK,CAAER,EAAE6E,UAAUzE,GAAaM,OAAOe,eAAezB,EAAE,YAAY,CAAC4B,UAAS,IAAK7B,CAAC,CAAh2B,GAAo2BuB,EAAE,GAA52B,IAA+2By8B,GAAG,CAAC,EAAEC,GAAG,CAAC,EAAEC,GAAGv9B,OAAOC,KAAKq9B,IAAI,SAASE,GAAGn+B,EAAEC,GAAG,IAAI,IAAII,EAAEe,UAAUC,OAAOjB,EAAE,IAAI2B,MAAM,EAAE1B,EAAEA,EAAE,EAAE,GAAGG,EAAE,EAAEA,EAAEH,EAAEG,IAAIJ,EAAEI,EAAE,GAAGY,UAAUZ,GAAG,OAAOw9B,GAAGh+B,IAAI,IAAIsB,SAAQ,SAAStB,GAAGC,EAAED,EAAEkB,MAAM,KAAK,CAACjB,GAAGoE,OAAOjE,GAAG,IAAGH,CAAC,CAAC,SAASm+B,GAAGp+B,GAAG,IAAI,IAAIC,EAAEmB,UAAUC,OAAOhB,EAAE,IAAI0B,MAAM,EAAE9B,EAAEA,EAAE,EAAE,GAAGG,EAAE,EAAEA,EAAEH,EAAEG,IAAIC,EAAED,EAAE,GAAGgB,UAAUhB,IAAI49B,GAAGh+B,IAAI,IAAIsB,SAAQ,SAAStB,GAAGA,EAAEkB,MAAM,KAAKb,EAAE,GAAE,CAAC,SAASg+B,GAAGr+B,GAAG,IAAIC,EAAED,EAA4C,OAA1CA,EAAE+B,MAAM+C,UAAUG,MAAMD,KAAK5D,UAAU,GAAU68B,GAAGh+B,GAAGg+B,GAAGh+B,GAAGiB,MAAM,KAAKlB,QAAG,CAAM,CAAC,SAASs+B,GAAGt+B,GAAG,OAAOA,EAAEu7B,SAASv7B,EAAEu7B,OAAO,OAAO,IAAIt7B,EAAED,EAAE6F,SAAwB,GAAf7F,EAAEA,EAAEu7B,QAAQe,GAAMr8B,EAAE,OAAOA,EAAEm9B,GAAGp9B,EAAEC,IAAIA,EAAEq7B,GAAGiD,GAAGT,YAAY99B,EAAEC,IAAIq7B,GAAGR,GAAGx1B,OAAOtF,EAAEC,EAAE,CAAC,IAAIs+B,GAAG,IAAIj8B,EAAEk8B,GAAG,CAACC,OAAO,WAAWhG,EAAEX,gBAAe,EAAGW,EAAEP,kBAAiB,EAAGkG,GAAG,SAAS,EAAEM,OAAOjG,EAAEgC,IAAI,CAACkE,MAAM,WAAW,IAAI3+B,EAAE,EAAEoB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,EAAE,OAAOqE,GAAG24B,GAAG,cAAcp+B,GAAGq+B,GAAG,qBAAqBr+B,GAAGq+B,GAAG,QAAQr+B,IAAI4+B,QAAQC,OAAO,yCAAyC,EAAEC,MAAM,WAAW,IAAI9+B,EAAE,EAAEoB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,EAAEnB,EAAED,EAAE++B,oBAAmB,IAAKtG,EAAEX,iBAAiBW,EAAEX,gBAAe,GAAIW,EAAEP,kBAAiB,EAAG+C,IAAG,WAAW+D,GAAG,CAACD,mBAAmB9+B,IAAIm+B,GAAG,QAAQp+B,EAAE,GAAE,GAAGi/B,MAAM,CAACr5B,KAAK,SAAS5F,GAAG,GAAG,OAAOA,EAAE,OAAO,KAAK,GAAG,WAAWQ,EAAER,IAAIA,EAAEu7B,QAAQv7B,EAAE6F,SAAS,MAAM,CAAC01B,OAAOv7B,EAAEu7B,OAAO11B,SAASu3B,GAAGp9B,EAAEu7B,OAAOv7B,EAAE6F,WAAW7F,EAAE6F,UAAU,GAAG9D,MAAM2C,QAAQ1E,IAAI,IAAIA,EAAEqB,OAAO,CAAC,IAAIpB,EAAE,IAAID,EAAE,GAAGoC,QAAQ,OAAOpC,EAAE,GAAGiF,MAAM,GAAGjF,EAAE,GAAGK,EAAE48B,GAAGj9B,EAAE,IAAI,MAAM,CAACu7B,OAAOl7B,EAAEwF,SAASu3B,GAAG/8B,EAAEJ,IAAIA,EAAE,CAAC,MAAG,iBAAiBD,KAAK,EAAEA,EAAEoC,QAAQ,GAAGiC,OAAOo0B,EAAEb,UAAU,OAAO53B,EAAEk/B,MAAMvI,IAA+C,CAAC4E,QAA3Ct7B,EAAEs9B,GAAGv9B,EAAEw5B,MAAM,KAAK,CAACgE,aAAY,KAAqBjC,QAAQe,GAAGz2B,SAASu3B,GAAGn9B,EAAEs7B,OAAOt7B,EAAE4F,WAAW5F,EAAE4F,UAAgB,iBAAiB7F,EAAE,CAACu7B,OAAOe,GAAGz2B,SAASu3B,GAAGd,GAAGt8B,IAAIA,QAAG,CAAM,GAAGm/B,QAAQZ,GAAGa,mBAAmBd,GAAGe,OAAOlE,IAAI6D,GAAG,WAAW,IAAqFh/B,OAAE,KAAnFA,GAAG,EAAEoB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,GAAG29B,oBAAgCn7B,EAAE5D,GAAG,EAAEW,OAAOC,KAAKk6B,GAAGx1B,QAAQjE,QAAQo3B,EAAEqE,eAAer3B,GAAGgzB,EAAEX,gBAAgB0G,GAAG/D,IAAIkE,MAAM,CAACt0B,KAAKrK,GAAG,EAAE,SAASs/B,GAAGr/B,EAAED,GAAG,OAAOW,OAAOe,eAAezB,EAAE,WAAW,CAAC2C,IAAI5C,IAAIW,OAAOe,eAAezB,EAAE,OAAO,CAAC2C,IAAI,WAAW,OAAO3C,EAAEs/B,SAASh7B,IAAI42B,GAAG,IAAIx6B,OAAOe,eAAezB,EAAE,OAAO,CAAC2C,IAAI,WAAW,GAAG6C,EAAE,CAAC,IAAIzF,EAAE4D,EAAEzB,cAAc,OAAO,OAAOnC,EAAEm6B,UAAUl6B,EAAEu/B,KAAKx/B,EAAEknB,QAAQ,CAAC,IAAIjnB,CAAC,CAAC,SAASw/B,GAAGz/B,GAAG,IAAi0B0C,EAAEiB,EAAEd,EAAEe,EAAE6B,EAA3zBpF,GAAVJ,EAAED,EAAEoe,OAAUshB,KAAKt/B,EAAEH,EAAE20B,KAAKp0B,EAAER,EAAEu7B,OAAO96B,EAAET,EAAE6F,SAASnF,EAAEV,EAAE2/B,UAAUx+B,EAAEnB,EAAE4/B,OAAOr+B,EAAEvB,EAAE6/B,MAAM/9B,EAAE9B,EAAE8/B,OAAOz9B,EAAErC,EAAE+/B,QAAQz9B,EAAEtC,EAAEggC,MAAoBx9B,OAAE,KAAhBD,EAAEvC,EAAEigC,YAAwB19B,EAAgBtC,GAAdwC,EAAErC,EAAE8/B,MAAM9/B,EAAEC,GAAM8/B,MAAiB59B,GAAXvC,EAAEyC,EAAE29B,OAAS,QAAQ5/B,GAAEiC,EAAE,CAACg2B,EAAEZ,iBAAiBp3B,EAAE,GAAG4D,OAAOo0B,EAAEb,UAAU,KAAKvzB,OAAO5D,GAAG,IAAIK,QAAO,SAASd,GAAG,OAAO,IAAIsC,EAAE+9B,QAAQj+B,QAAQpC,EAAE,IAAGc,QAAO,SAASd,GAAG,MAAM,KAAKA,KAAKA,CAAC,IAAGqE,OAAO/B,EAAE+9B,SAAShF,KAAK,KAA8tB,OAAztB54B,EAAE,CAACykB,SAAS,GAAGkU,WAAW11B,EAAEA,EAAE,CAAC,EAAEpD,EAAE84B,YAAY,CAAC,EAAE,CAAC,cAAc56B,EAAE,YAAYC,EAAE6/B,MAAM79B,EAAE89B,KAAKj+B,EAAE84B,WAAWmF,MAAM,MAAMC,MAAM,6BAA6BC,QAAQ,OAAOp8B,OAAOpE,EAAE,KAAKoE,OAAOrE,MAAMA,EAAEuC,KAAKD,EAAE+9B,QAAQj+B,QAAQ,SAAS,CAAC+9B,MAAM,GAAG97B,OAAOpE,EAAED,EAAE,GAAG,MAAM,OAAO,CAAC,EAAEwC,IAAIC,EAAE24B,WAAW/Y,GAAG,IAAI9gB,IAAIkB,EAAEykB,SAASjmB,KAAK,CAAC4pB,IAAI,QAAQuQ,WAAW,CAACsF,GAAGj+B,EAAE24B,WAAW,oBAAoB,SAAS/2B,OAAOhC,GAAG62B,MAAOhS,SAAS,CAAC3lB,YAAYkB,EAAE24B,WAAWyE,OAAqB7/B,EAAE0F,EAAEA,EAAE,CAAC,EAAEjD,GAAG,CAAC,EAAE,CAAC84B,OAAO/6B,EAAEqF,SAASpF,EAAEi/B,KAAKr/B,EAAEu0B,KAAKx0B,EAAE0/B,OAAOh+B,EAAE69B,UAAUj/B,EAAEk/B,OAAOz+B,EAAEmE,OAAOI,EAAEA,EAAE,CAAC,EAAE1F,GAAGsC,EAAEgD,UAAiJjF,GAAvID,EAAEA,EAAE8/B,OAAO7/B,EAAE6/B,MAAM7B,GAAG,uBAAuBr+B,IAAI,CAACknB,SAAS,GAAGkU,WAAW,CAAC,GAAGiD,GAAG,uBAAuBr+B,IAAI,CAACknB,SAAS,GAAGkU,WAAW,CAAC,IAAOlU,SAAS9mB,EAAEA,EAAEg7B,WAAkBp7B,EAAEknB,SAAS7mB,EAAEL,EAAEo7B,WAAWh7B,EAAEe,GAAGwC,GAAGjB,EAAE1C,GAAGu7B,OAAO14B,EAAEH,EAAEmD,SAASjC,EAAElB,EAAEwkB,SAASzhB,EAAE/C,EAAE04B,WAAW14B,GAAE,KAAMA,EAAEA,EAAEk9B,QAAQ,GAAGv7B,OAAOV,EAAE,KAAKU,OAAOo0B,EAAEb,UAAU,KAAKvzB,OAAOxB,GAAGH,EAAE,CAAC,CAACmoB,IAAI,MAAMuQ,WAAW,CAACuF,MAAM,kBAAkBzZ,SAAS,CAAC,CAAC2D,IAAI,SAASuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAED,GAAG,CAAC,EAAE,CAACi7B,GAAGh+B,IAAIwkB,SAAStjB,QAAQf,GAAGc,EAAE3D,GAAGknB,SAASzhB,EAAE9B,EAAE+7B,KAAKh9B,EAAEiB,EAAEixB,KAAKhxB,EAAED,EAAEy3B,WAAWp7B,EAAE2D,EAAE2B,OAAOu0B,GAAGl2B,EAAEA,EAAEg8B,YAAYl6B,EAAEy6B,QAAQx9B,EAAEw9B,QAAQx9B,EAAE+C,EAAE06B,MAAM16B,EAAE26B,OAAO,EAAE36B,EAAE,GAAG7B,EAAE+8B,MAAMhH,GAAGj0B,EAAEA,EAAE,CAAC,EAAE1F,GAAG,CAAC,EAAE,CAAC,mBAAmB,GAAGqE,OAAO3B,EAAEiB,EAAEyxB,EAAE,GAAG,OAAO/wB,OAAOoB,EAAE9B,EAAEmsB,EAAE,GAAG,UAAU,CAAC,CAACjF,IAAI,MAAMuQ,WAAWx3B,EAAEsjB,SAASrkB,IAAI,CAAC,SAAS+9B,GAAG5gC,GAAG,IAAIC,EAAED,EAAE6gC,QAAQxgC,EAAEL,EAAEmgC,MAAM//B,EAAEJ,EAAEogC,OAAO5/B,EAAER,EAAE2/B,UAAUl/B,EAAET,EAAE6/B,MAAMn/B,EAAEV,EAAEggC,MAAM7+B,EAAEnB,EAAEigC,UAAU1+B,OAAE,IAASJ,GAAGA,EAAirB,OAA/qBnB,EAAE0F,EAAEA,EAAEA,EAAE,CAAC,EAAEhF,EAAE06B,YAAY36B,EAAE,CAACo/B,MAAMp/B,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC6/B,MAAM5/B,EAAE2/B,QAAQhF,KAAK,OAAO95B,IAAIvB,EAAEqiB,GAAG,IAAIlhB,EAAEuE,EAAE,CAAC,EAAEhF,EAAE4E,QAAQu0B,GAAGr5B,KAAKW,EAAEw+B,WAAWj/B,GAAGa,EAAE,CAACo+B,UAAUn/B,EAAEsgC,eAAc,EAAGX,MAAM9/B,EAAE+/B,OAAOhgC,IAAIu/B,UAAUn/B,EAAEe,EAAE4+B,MAAM//B,OAAE,KAAUC,EAAEkB,EAAE6+B,QAA30/B,GAAq1/B//B,EAAEkB,OAAE,KAAUlB,EAAEkB,EAAEu/B,gBAAgBzgC,EAAEA,EAAE,GAAGA,GAAGkB,GAAGuE,EAAE,aAAazB,OAAO3D,EAAE00B,EAAEwD,QAAG,IAASp4B,EAA16/B,GAA86/BA,GAAG,EAAE,QAAQ6D,OAAO3D,EAAEovB,EAAE8I,EAAEx4B,EAAE,EAAE,QAAQmB,EAAE,yBAAyB8C,OAAO3D,EAAE00B,EAAEwD,EAAE,qBAAqBv0B,OAAO3D,EAAEovB,EAAE8I,EAAE,SAAS,aAAav0B,OAAO3D,EAAE00B,EAAEwD,EAAE,QAAQv0B,OAAO3D,EAAEovB,EAAE8I,EAAE,QAAQv4B,GAAG,SAASgE,OAAO3D,EAAEo4B,KAAKF,GAAGl4B,EAAEq4B,OAAO,EAAE,GAAG,MAAM10B,OAAO3D,EAAEo4B,KAAKF,GAAGl4B,EAAEs4B,OAAO,EAAE,GAAG,MAAM34B,GAAG,UAAUgE,OAAO3D,EAAEmxB,OAAO,UAAU1wB,EAAE,qBAAqBA,EAAEw+B,WAAmB,GAARx+B,EAAEw4B,GAAGx4B,IAAOE,SAASrB,EAAE2gC,MAAMx/B,IAAGA,EAAE,IAAYF,KAAK,CAAC4pB,IAAI,OAAOuQ,WAAWp7B,EAAEknB,SAAS,CAACjnB,KAAKQ,GAAGU,EAAEF,KAAK,CAAC4pB,IAAI,OAAOuQ,WAAW,CAACkF,MAAM,WAAWpZ,SAAS,CAACzmB,KAAKU,CAAC,CAAC,IAAI4/B,GAAGjG,GAAGx1B,OAAO,SAAS07B,GAAGhhC,GAAG,IAAIC,EAAED,EAAE,GAAGK,EAAEL,EAAE,GAAwB,OAArBA,EAAEuC,EAAEvC,EAAEiF,MAAM,GAAG,GAAG,GAAS,CAACi7B,OAAM,EAAGC,MAAMlgC,EAAEmgC,OAAO//B,EAAEuF,KAAK7D,MAAM2C,QAAQ1E,GAAG,CAAC6qB,IAAI,IAAIuQ,WAAW,CAACkF,MAAM,GAAGj8B,OAAOo0B,EAAEb,UAAU,KAAKvzB,OAAO2yB,EAAEC,QAAQ/P,SAAS,CAAC,CAAC2D,IAAI,OAAOuQ,WAAW,CAACkF,MAAM,GAAGj8B,OAAOo0B,EAAEb,UAAU,KAAKvzB,OAAO2yB,EAAEI,WAAWpG,KAAK,eAAelrB,EAAE9F,EAAE,KAAK,CAAC6qB,IAAI,OAAOuQ,WAAW,CAACkF,MAAM,GAAGj8B,OAAOo0B,EAAEb,UAAU,KAAKvzB,OAAO2yB,EAAEG,SAASnG,KAAK,eAAelrB,EAAE9F,EAAE,OAAO,CAAC6qB,IAAI,OAAOuQ,WAAW,CAACpK,KAAK,eAAelrB,EAAE9F,IAAI,CAAC,IAAIihC,GAAG,CAACf,OAAM,EAAGC,MAAM,IAAIC,OAAO,KAAK,SAASc,GAAG1gC,EAAEC,GAAG,IAAIC,EAAED,EAAE,MAAM,OAAOA,GAAG,OAAOg4B,EAAEf,eAAej3B,EAAE67B,IAAI,IAAIsC,SAAQ,SAAS5+B,EAAEC,GAAG,IAAII,EAAED,EAA4B,GAA1Bi+B,GAAG,uBAA0B,OAAO39B,IAAIN,EAAEi9B,GAAG78B,IAAI,CAAC,EAAEA,EAAEJ,EAAEyF,UAAUrF,EAAEC,EAAEL,EAAEm7B,QAAQ96B,GAAGD,GAAGC,GAAGsgC,GAAGtgC,IAAIsgC,GAAGtgC,GAAGD,GAAG,OAAOR,EAAEghC,GAAGD,GAAGtgC,GAAGD,KAAKH,EAAEG,EAAEJ,EAAEK,EAAE01B,GAAGsC,EAAEH,mBAAmBj4B,GAAG8gC,QAAQC,MAAM,mBAAmB/8B,OAAOhE,EAAE,kBAAkBgE,OAAOjE,EAAE,kBAAkBJ,EAAE0F,EAAEA,EAAE,CAAC,EAAEu7B,IAAI,CAAC,EAAE,CAACr7B,KAAK6yB,EAAEH,kBAAkB93B,GAAG69B,GAAG,wBAAwB,CAAC,IAAI,GAAE,CAAC,SAASgD,KAAK,CAAC,SAASC,GAAGthC,GAAGuhC,GAAG1L,KAAK,GAAGxxB,OAAOm9B,GAAG,KAAKn9B,OAAOrE,EAAE,UAAUuhC,GAAGzL,QAAQ,GAAGzxB,OAAOm9B,GAAG,KAAKn9B,OAAOrE,GAAG,GAAGqE,OAAOm9B,GAAG,KAAKn9B,OAAOrE,EAAE,WAAW,GAAGqE,OAAOm9B,GAAG,KAAKn9B,OAAOrE,EAAE,SAAS,CAAC,IAAIuhC,GAAG9I,EAAEJ,oBAAoB71B,GAAGA,EAAEqzB,MAAMrzB,EAAEszB,QAAQtzB,EAAE,CAACqzB,KAAKwL,GAAGvL,QAAQuL,IAAIG,GAAG,aAAaC,GAAG,CAACC,MAAM,SAAS1hC,GAAG,OAAOuhC,GAAG1L,KAAK,GAAGxxB,OAAOm9B,GAAG,KAAKn9B,OAAOrE,EAAE,YAAY,WAAW,OAAOshC,GAAGthC,EAAE,CAAC,EAAE2hC,IAAIL,IAAIM,GAAG,WAAW,EAAE,SAASC,GAAG7hC,GAAG,MAAM,iBAAiBA,EAAEy3B,aAAaz3B,EAAEy3B,aAAapV,GAAG,KAAK,CAAC,SAASyf,GAAG9hC,GAAG,OAAO4D,EAAEm+B,gBAAgB,6BAA6B/hC,EAAE,CAAC,SAASgiC,GAAGhiC,GAAG,OAAO4D,EAAEzB,cAAcnC,EAAE,CAAC,IAAIiiC,GAAG,CAACvI,QAAQ,SAAS15B,GAAG,IAAIC,EAAED,EAAE,GAAGC,EAAEiiC,aAAaliC,EAAE,GAAGsB,SAAQ,SAAStB,GAAGC,EAAEiiC,WAAW3H,aAAa,SAASt6B,EAAEI,EAAEL,GAAG,IAAiDI,OAAE,KAA/CJ,GAAG,EAAEoB,UAAUC,aAAQ,IAASrB,EAAEA,EAAE,CAAC,GAAGmiC,MAAkB,QAAQ9hC,EAAEwqB,IAAIiX,GAAGE,GAAGhiC,EAAE,GAAG,iBAAiBK,EAAE,OAAOuD,EAAEw+B,eAAe/hC,GAAG,IAAIG,EAAEJ,EAAEC,EAAEwqB,KAAK,OAAOlqB,OAAOC,KAAKP,EAAE+6B,YAAY,IAAI95B,SAAQ,SAAStB,GAAGQ,EAAE05B,aAAal6B,EAAEK,EAAE+6B,WAAWp7B,GAAG,KAAIK,EAAE6mB,UAAU,IAAI5lB,SAAQ,SAAStB,GAAGQ,EAAE6hC,YAAYpiC,EAAED,EAAE,CAACmiC,KAAK/hC,IAAI,IAAGI,CAAC,CAAtU,CAAwUR,GAAGC,EAAE,IAAG,OAAOA,EAAEw3B,aAAapV,IAAIoW,EAAEL,oBAAoBp4B,EAAE4D,EAAE0+B,eAAetiC,EAAE,IAAIqE,QAAQrE,EAAEC,GAAGsiC,UAAU,KAAKviC,EAAE,GAAGqE,OAAOrE,EAAE,mCAAmCC,EAAEiiC,WAAWM,aAAaxiC,EAAEC,IAAIA,EAAEwiC,SAAS,EAAEC,KAAK,SAAS1iC,GAAG,IAAIC,EAAED,EAAE,GAAGK,EAAEL,EAAE,GAAG,IAAIs5B,GAAGr5B,GAAGmC,QAAQq2B,EAAEZ,kBAAkB,OAAOoK,GAAGvI,QAAQ15B,GAAG,IAAII,EAAE,IAAI25B,OAAO,GAAG11B,OAAOo0B,EAAEb,UAAU,eAAev3B,EAAE,GAAG+6B,WAAWsF,GAAGrgC,EAAE,GAAG+6B,WAAWkF,QAAQtgC,EAAEK,EAAE,GAAG+6B,WAAWkF,MAAM9G,MAAM,KAAK7zB,QAAO,SAAS3F,EAAEC,GAAG,OAAOA,IAAIw4B,EAAEZ,kBAAkB53B,EAAEi/B,MAAM9+B,GAAGJ,EAAE2iC,MAAM3iC,EAAE4iC,QAAQ3hC,KAAKhB,GAAGD,CAAC,GAAE,CAAC4iC,OAAO,GAAGD,MAAM,KAAKtiC,EAAE,GAAG+6B,WAAWkF,MAAMtgC,EAAE2iC,MAAMtH,KAAK,KAAK,IAAIr7B,EAAE4iC,OAAOvhC,OAAOpB,EAAE4iC,gBAAgB,SAAS5iC,EAAEi6B,aAAa,QAAQl6B,EAAE4iC,OAAOvH,KAAK,OAAOh7B,EAAEA,EAAEkE,IAAI42B,IAAIE,KAAK,MAAMp7B,EAAEi6B,aAAa7X,EAAE,IAAIpiB,EAAEk6B,UAAU95B,CAAC,GAAG,SAASyiC,GAAG9iC,GAAGA,GAAG,CAAC,SAAS+iC,GAAG1iC,EAAEL,GAAG,IAAII,EAAE,mBAAmBJ,EAAEA,EAAE4hC,GAAG,IAAIvhC,EAAEgB,OAAOjB,KAAKq4B,EAAEN,iBAAiB/C,GAAEvyB,EAAEmgC,uBAA0BF,KAAI,WAAW,IAAI9iC,GAAE,IAAKy4B,EAAEX,gBAAgBmK,GAAGxJ,EAAEX,iBAAiBmK,GAAGvI,QAAQz5B,EAAEwhC,GAAGC,MAAM,UAAUrhC,EAAEkE,IAAIvE,GAAGC,IAAIG,GAAG,GAAE,CAAC,IAAI6iC,IAAG,EAAG,SAASC,KAAKD,IAAG,CAAE,CAAC,SAASE,KAAKF,IAAG,CAAE,CAAC,IAAIG,GAAG,KAAK,SAASC,GAAGrjC,GAAG,IAAIS,EAAEC,EAAET,EAAEkB,EAAEuB,GAAG+1B,EAAEP,mBAAmBj4B,EAAED,EAAEsjC,aAAa7iC,OAAE,IAASR,EAAE2hC,GAAG3hC,EAAEA,EAAED,EAAEujC,aAAa7iC,OAAE,IAAST,EAAE2hC,GAAG3hC,EAAEA,EAAED,EAAEwjC,uBAAuBriC,OAAE,IAASlB,EAAE2hC,GAAG3hC,EAAED,OAAE,KAAUA,EAAEA,EAAEyjC,sBAAsB7/B,EAAE5D,EAAEojC,GAAG,IAAI1gC,GAAE,SAAS1C,GAAG,IAAIQ,EAAEyiC,KAAKziC,EAAE87B,GAAGjD,GAAGr5B,GAAGsB,SAAQ,SAAStB,GAAG,IAAIC,EAAEI,EAAED,EAAE,cAAcJ,EAAE0jC,MAAM,EAAE1jC,EAAE2jC,WAAWtiC,SAASwgC,GAAG7hC,EAAE2jC,WAAW,MAAMlL,EAAER,sBAAsB92B,EAAEnB,EAAE4jC,QAAQnjC,EAAET,EAAE4jC,SAAS,eAAe5jC,EAAE0jC,MAAM1jC,EAAE4jC,OAAO1B,YAAYzJ,EAAER,sBAAsB92B,EAAEnB,EAAE4jC,OAAO1B,YAAY,eAAeliC,EAAE0jC,MAAM7B,GAAG7hC,EAAE4jC,UAAU7M,EAAE30B,QAAQpC,EAAE6jC,iBAAiB,UAAU7jC,EAAE6jC,gBAA2BzjC,GAAXC,EAAEL,EAAE4jC,QAAWnM,aAAap3B,EAAEo3B,aAAa3H,GAAG,KAAKzvB,EAAEA,EAAEo3B,aAAap3B,EAAEo3B,aAAaxB,GAAG,KAAK71B,GAAGC,IAAIA,GAAGJ,EAAEs9B,GAAGjE,GAAGt5B,EAAE4jC,UAAUrI,OAAOt7B,EAAEA,EAAE4F,SAAS7F,EAAE4jC,OAAO1J,aAAapK,EAAEzvB,GAAGG,GAAGP,GAAGD,EAAE4jC,OAAO1J,aAAajE,EAAEh2B,KAAKA,EAAED,EAAE4jC,SAAS3jC,EAAEs5B,WAAWt5B,EAAEs5B,UAAUuK,UAAU7jC,EAAEs5B,UAAUuK,SAASrL,EAAEZ,mBAAmBn3B,EAAEV,EAAE4jC,QAAQ,IAAG,IAAGn+B,GAAG29B,GAAGW,QAAQ/jC,EAAE,CAACgkC,WAAU,EAAG5I,YAAW,EAAG6I,eAAc,EAAGC,SAAQ,IAAK,CAAic,SAASC,GAAGnkC,EAAEC,GAAG,IAAII,EAAE,EAAEe,UAAUC,aAAQ,IAASpB,EAAEA,EAAE,CAACmkC,aAAY,GAAIhkC,EAAzgB,SAAYJ,GAAG,IAAIC,EAAED,EAAEy3B,aAAa,eAAep3B,EAAEL,EAAEy3B,aAAa,aAAar3B,OAAE,IAASJ,EAAEqkC,UAAUrkC,EAAEqkC,UAAUzK,OAAO,GAAGp5B,EAAE+8B,GAAGjE,GAAGt5B,IAAI,OAAOQ,EAAE+6B,SAAS/6B,EAAE+6B,OAAOe,IAAIr8B,GAAGI,IAAIG,EAAE+6B,OAAOt7B,EAAEO,EAAEqF,SAASxF,GAAGG,EAAEqF,UAAUrF,EAAE+6B,SAAS/6B,EAAE+6B,QAAQ,EAAEn7B,EAAEiB,SAASb,EAAEqF,UAAUxF,EAAEG,EAAE+6B,OAAOn7B,EAAEJ,EAAEqkC,WAAW7H,GAAGn8B,IAAI,CAAC,GAAGD,IAAI+8B,GAAG38B,EAAE+6B,OAAOO,GAAG97B,EAAEqkC,eAAe7jC,EAAEqF,UAAU4yB,EAAEqE,cAAc98B,EAAEskC,YAAYtkC,EAAEskC,WAAWC,WAAWC,KAAKC,YAAYjkC,EAAEqF,SAAS7F,EAAEskC,WAAWI,OAAOlkC,CAAC,CAA4EmkC,CAAG3kC,GAAGQ,EAAEJ,EAAEyF,SAASpF,EAAEL,EAAEm7B,OAAO76B,EAAEN,EAAEw9B,KAAKz8B,GAAGlB,EAAEo5B,IAAIl4B,EAAEnB,GAAGo7B,YAAYz1B,QAAO,SAAS3F,EAAEC,GAAG,MAAM,UAAUD,EAAEmF,MAAM,UAAUnF,EAAEmF,OAAOnF,EAAEC,EAAEkF,MAAMlF,EAAE0B,OAAO3B,CAAC,GAAE,CAAC,GAAGI,EAAEe,EAAEs2B,aAAa,SAASt2B,EAAEA,EAAEs2B,aAAa,oBAAoBgB,EAAET,WAAW53B,EAAEH,EAAE,mBAAmB,GAAGoE,OAAOo0B,EAAEZ,iBAAiB,WAAWxzB,OAAOlD,GAAG+3B,MAAOj5B,EAAE,eAAe,OAAOA,EAAE2kC,UAAU,UAAU3kC,GAAoCsB,GAAjCtB,EAAEk+B,GAAG,sBAAsB,CAAC,EAAEn+B,GAAKK,EAAE+jC,aAAa/jC,GAAGkB,EAAEvB,GAAGy3B,aAAa,SAASl2B,EAAE,GAAGA,EAAElB,EAAEA,EAAEm5B,MAAM,KAAK7zB,QAAO,SAAS3F,EAAEC,GAAsBA,GAAfI,EAAEJ,EAAEu5B,MAAM,MAAS,GAAvB,IAA0Bn5B,EAAEA,EAAE4E,MAAM,GAAG,OAAOhF,GAAG,EAAEI,EAAEgB,SAASrB,EAAEC,GAAGI,EAAEg7B,KAAK,KAAKzB,QAAQ55B,CAAC,GAAE,CAAC,GAAGuB,GAAG,IAAG,OAAOmE,EAAE,CAACG,SAASrF,EAAEq/B,MAAM7/B,EAAEy3B,aAAa,SAASsI,QAAQ//B,EAAEy3B,aAAa,oBAAoB8D,OAAO96B,EAAEk/B,UAAU9G,EAAEjE,KAAK,CAAC/uB,SAAS,KAAK01B,OAAO,KAAKqC,KAAK,IAAIkC,OAAO,KAAKF,QAAO,EAAGI,MAAM,CAACK,QAAQ3/B,EAAE4E,OAAO/D,EAAE65B,WAAWj6B,IAAIlB,EAAE,CAAC,IAAI4kC,GAAG/J,GAAGx1B,OAAO,SAASw/B,GAAG9kC,GAAG,IAAIC,EAAE,SAASw4B,EAAEX,eAAeqM,GAAGnkC,EAAE,CAACokC,aAAY,IAAKD,GAAGnkC,GAAG,OAAOC,EAAE+/B,MAAMK,QAAQj+B,QAAQw0B,GAAGyH,GAAG,qBAAqBr+B,EAAEC,GAAGo+B,GAAG,iCAAiCr+B,EAAEC,EAAE,CAAC,IAAI8kC,GAAG,IAAIzgC,IAAI,SAAS0gC,GAAGhlC,GAAG,IAAII,EAAE,EAAEgB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,KAAK,IAAIqE,EAAE,OAAOm5B,QAAQqG,UAAU,SAASzkC,EAAER,GAAG,OAAOC,EAAEuE,IAAI,GAAGH,OAAO6xB,EAAE,KAAK7xB,OAAOrE,GAAG,CAAC,SAASS,EAAET,GAAG,OAAOC,EAAEwiC,OAAO,GAAGp+B,OAAO6xB,EAAE,KAAK7xB,OAAOrE,GAAG,CAAC,IAAIC,EAAE2D,EAAE5B,gBAAgBu3B,UAAUl5B,EAAEo4B,EAAEqE,aAAaiI,GAAG1O,EAAE9xB,KAAI,SAASvE,GAAG,MAAM,MAAMqE,OAAOrE,EAAE,IAAGqE,OAAO1D,OAAOC,KAAKikC,KAAKxkC,EAAEo9B,SAAS,OAAOp9B,EAAEY,KAAK,MAAM,IAAIP,EAAE,CAAC,IAAI2D,OAAOuyB,EAAE,UAAUvyB,OAAOge,EAAE,OAAOhe,OAAOhE,EAAEkE,KAAI,SAASvE,GAAG,MAAM,IAAIqE,OAAOrE,EAAE,UAAUqE,OAAOge,EAAE,KAAK,KAAIgZ,KAAK,MAAM,GAAG,IAAI36B,EAAEW,OAAO,OAAOu9B,QAAQqG,UAAU5kC,EAAE,GAAG,IAAIA,EAAEg5B,GAAGr5B,EAAEklC,iBAAiBxkC,GAAG,CAAC,MAAMV,GAAG,CAAC,KAAK,EAAEK,EAAEgB,QAAQ,OAAOu9B,QAAQqG,UAAUzkC,EAAE,WAAWC,EAAE,YAAY,IAAIU,EAAEsgC,GAAGC,MAAM,UAAUngC,EAAElB,EAAEsF,QAAO,SAAS3F,EAAEC,GAAG,IAAI,IAAII,EAAEykC,GAAG7kC,GAAGI,GAAGL,EAAEiB,KAAKZ,EAAE,CAAC,MAAML,GAAGm2B,GAAG,gBAAgBn2B,EAAEmF,MAAMg8B,QAAQC,MAAMphC,EAAE,CAAC,OAAOA,CAAC,GAAE,IAAI,OAAO,IAAI4+B,SAAQ,SAAS3+B,EAAEI,GAAGu+B,QAAQuG,IAAI5jC,GAAG6jC,MAAK,SAASplC,GAAG+iC,GAAG/iC,GAAE,WAAWQ,EAAE,UAAUA,EAAE,YAAYC,EAAE,WAAW,mBAAmBL,GAAGA,IAAIe,IAAIlB,GAAG,GAAE,IAAGolC,OAAM,SAASrlC,GAAGmB,IAAId,EAAEL,EAAE,GAAE,GAAE,CAAC,SAASslC,GAAGtlC,GAAG,IAAIC,EAAE,EAAEmB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,KAAK0jC,GAAG9kC,GAAGolC,MAAK,SAASplC,GAAGA,GAAG+iC,GAAG,CAAC/iC,GAAGC,EAAE,GAAE,CAA2H,SAASslC,GAAGvlC,GAAG,IAAIC,EAAE,EAAEmB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,EAAEf,EAAEJ,EAAE0/B,UAAUv/B,OAAE,IAASC,EAAEw4B,EAAEx4B,EAAEG,OAAE,KAAUH,EAAEJ,EAAE2/B,SAASv/B,EAAEI,OAAE,KAAUJ,EAAEJ,EAAE20B,MAAM,KAAKv0B,EAAEK,OAAE,KAAUL,EAAEJ,EAAE6/B,QAAQ,KAAKz/B,EAAEc,OAAE,KAAUd,EAAEJ,EAAE4/B,OAAO,KAAKx/B,EAAEkB,OAAE,KAAUlB,EAAEJ,EAAE8/B,SAAS,KAAK1/B,EAAEyB,OAAE,KAAUzB,EAAEJ,EAAEogC,SAAS,GAAGhgC,EAAEgC,OAAE,KAAUhC,EAAEJ,EAAEm7B,YAAY,CAAC,EAAE/6B,EAAEiC,OAAE,KAAUjC,EAAEJ,EAAEqF,QAAQ,CAAC,EAAEjF,EAAE,GAAGL,EAAE,CAAC,IAAIuC,EAAEvC,EAAEu7B,OAAO/4B,EAAExC,EAAE6F,SAASpD,EAAEzC,EAAE4F,KAAK,OAAO05B,GAAG55B,EAAE,CAACg+B,KAAK,QAAQ1jC,IAAG,WAAW,OAAOo+B,GAAG,2BAA2B,CAACoH,eAAexlC,EAAEylC,OAAOxlC,IAAIw4B,EAAET,WAAW72B,EAAEkB,EAAE,mBAAmB,GAAGgC,OAAOo0B,EAAEZ,iBAAiB,WAAWxzB,OAAO9C,GAAG23B,MAAO72B,EAAE,eAAe,OAAOA,EAAEuiC,UAAU,UAAUnF,GAAG,CAACrhB,MAAM,CAACshB,KAAKsB,GAAGv+B,GAAGmyB,KAAKn0B,EAAEugC,GAAGvgC,EAAEmF,MAAM,CAACs6B,OAAM,EAAGC,MAAM,KAAKC,OAAO,KAAKx6B,KAAK,CAAC,IAAI21B,OAAOh5B,EAAEsD,SAASrD,EAAEm9B,UAAUj6B,EAAEA,EAAE,CAAC,EAAEmzB,GAAGz4B,GAAGw/B,OAAOp/B,EAAEq/B,MAAM1+B,EAAE2+B,OAAOp/B,EAAEq/B,QAAQx+B,EAAEy+B,MAAM,CAAC5E,WAAW/4B,EAAEiD,OAAOhD,EAAE+9B,QAAQv+B,IAAI,GAAE,CAAC,CAAv8Bu0B,EAAE9xB,KAAI,SAASvE,GAAG+kC,GAAGvgC,IAAI,MAAMH,OAAOrE,GAAG,IAAGW,OAAOC,KAAK21B,EAAEH,IAAI7xB,IAAIwgC,GAAGvgC,IAAIC,KAAKsgC,KAAKpkC,OAAOC,KAAK21B,EAAE9L,IAAIlmB,IAAIwgC,GAAGvgC,IAAIC,KAAKsgC,KAAu1BA,GAAG1iC,EAAE0iC,IAAIjjC,EAAE,CAAC04B,OAAO,WAAW,MAAM,CAAC50B,MAAMxF,EAAEmlC,GAAG,SAASvlC,GAAG,IAAIC,EAAE,EAAEmB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,EAAEf,GAAGL,GAAG,CAAC,GAAG4F,KAAK5F,EAAEs+B,GAAGt+B,GAAG,CAAC,GAA4C,OAAzCA,GAAGA,EAAEC,EAAE20B,SAAS50B,GAAG,CAAC,GAAG4F,KAAK5F,EAAEs+B,GAAGt+B,GAAG,CAAC,IAAWI,EAAEC,EAAEqF,EAAEA,EAAE,CAAC,EAAEzF,GAAG,CAAC,EAAE,CAAC20B,KAAK50B,IAAI,IAAI,IAAII,CAAC,EAAEmF,MAAM,WAAW,MAAM,CAACmgC,0BAA0B,SAAS1lC,GAAG,OAAOA,EAAEsjC,aAAa0B,GAAGhlC,EAAEujC,aAAa+B,GAAGtlC,CAAC,EAAE,EAAE2lC,SAAS,SAAS3lC,GAAGA,EAAE2+B,MAAM,SAAS3+B,GAAG,IAAIC,EAAED,EAAEqK,KAAkB,OAAbrK,EAAEA,EAAE4lC,SAAgBZ,QAAG,IAAS/kC,EAAE2D,EAAE3D,OAAE,IAASD,EAAE,WAAW,EAAEA,EAAE,EAAEA,EAAE6lC,+BAA+B,SAASzlC,EAAEJ,GAAG,IAAIQ,EAAER,EAAE6F,SAASpF,EAAET,EAAE6/B,MAAMn/B,EAAEV,EAAE+/B,QAAQ5+B,EAAEnB,EAAEu7B,OAAOh6B,EAAEvB,EAAE2/B,UAAU79B,EAAE9B,EAAE4/B,OAAO3/B,EAAED,EAAE40B,KAAKvyB,EAAErC,EAAE8/B,OAAOx9B,EAAEtC,EAAEggC,MAAM,OAAO,IAAIpB,SAAQ,SAASv+B,EAAEL,GAAG4+B,QAAQuG,IAAI,CAACjE,GAAG1gC,EAAEW,GAAGlB,EAAE4F,SAASq7B,GAAGjhC,EAAE4F,SAAS5F,EAAEs7B,QAAQqD,QAAQqG,QAAQ,CAAC/E,OAAM,EAAGC,MAAM,IAAIC,OAAO,IAAIx6B,KAAK,CAAC,MAAMw/B,MAAK,SAASplC,GAAgBA,GAATC,EAAEsC,EAAEvC,EAAE,IAAO,GAAjB,IAAoBC,EAAEA,EAAE,GAAGI,EAAE,CAACD,EAAEq/B,GAAG,CAACrhB,MAAM,CAACshB,KAAK1/B,EAAE40B,KAAK30B,GAAGs7B,OAAOp6B,EAAE0E,SAASrF,EAAEm/B,UAAUp+B,EAAEq+B,OAAO99B,EAAEg+B,OAAOz9B,EAAEw9B,MAAMp/B,EAAEs/B,QAAQr/B,EAAEs/B,MAAM19B,EAAE29B,WAAU,KAAM,IAAGoF,MAAMrlC,EAAE,GAAE,EAAEA,EAAE8lC,qBAAqB,SAAS9lC,GAAG,IAAIC,EAAEI,EAAEL,EAAEknB,SAAS9mB,EAAEJ,EAAEo7B,WAAW56B,EAAER,EAAE0/B,KAAKj/B,EAAET,EAAE2/B,UAAyB,OAAO,GAAtB3/B,EAAE25B,GAAG35B,EAAEsF,SAAmBjE,SAASjB,EAAEugC,MAAM3gC,GAAG65B,GAAGp5B,KAAKR,EAAEo+B,GAAG,oCAAoC,CAACqB,KAAKl/B,EAAEm/B,UAAUl/B,EAAEslC,eAAevlC,EAAE2/B,MAAM6F,UAAUxlC,EAAE2/B,SAAS9/B,EAAEY,KAAKhB,GAAGO,EAAEoF,MAAM,CAACshB,SAAS7mB,EAAE+6B,WAAWh7B,EAAE,CAAC,GAAGH,EAAE,CAACu6B,OAAO,WAAW,MAAM,CAACyL,MAAM,SAASjmC,GAAG,IAAIK,EAAE,EAAEe,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,EAAEnB,EAAEI,EAAEggC,QAAQjgC,OAAE,IAASH,EAAE,GAAGA,EAAE,OAAOq/B,GAAG,CAACoE,KAAK,UAAS,WAAWtF,GAAG,2BAA2B,CAAC8H,UAAUlmC,EAAEylC,OAAOplC,IAAI,IAAIJ,EAAE,GAAG,OAAOD,GAAE,SAASA,GAAG+B,MAAM2C,QAAQ1E,GAAGA,EAAEuE,KAAI,SAASvE,GAAGC,EAAEA,EAAEoE,OAAOrE,EAAEu/B,SAAS,IAAGt/B,EAAEA,EAAEoE,OAAOrE,EAAEu/B,SAAS,IAAG,CAAC,CAAC1U,IAAI,OAAOuQ,WAAW,CAACkF,MAAM,CAAC,GAAGj8B,OAAOo0B,EAAEb,UAAU,YAAYvzB,OAAOhC,EAAEjC,IAAIi7B,KAAK,MAAMnU,SAASjnB,GAAG,GAAE,EAAE,GAAGqC,EAAE,CAACk4B,OAAO,WAAW,MAAM,CAAC2L,QAAQ,SAAS3lC,GAAG,IAA6JR,EAAzJS,EAAE,EAAEW,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,EAAYV,OAAE,KAAZV,EAAES,EAAEo/B,OAAmB,KAAK7/B,EAAcmB,OAAE,KAAdnB,EAAES,EAAE4/B,SAAqB,GAAGrgC,EAAiBuB,OAAE,KAAjBvB,EAAES,EAAE26B,YAAwB,CAAC,EAAEp7B,EAAa8B,OAAE,KAAb9B,EAAES,EAAE6E,QAAoB,CAAC,EAAEtF,EAAE,OAAOs/B,GAAG,CAACoE,KAAK,UAAU7C,QAAQrgC,IAAG,WAAW,OAAO49B,GAAG,2BAA2B,CAACyC,QAAQrgC,EAAEilC,OAAOhlC,IAAmIR,GAA/HD,EAAE,CAAC6gC,QAAQrgC,EAAEuE,WAAW86B,MAAMn/B,EAAEs/B,MAAM,CAAC5E,WAAW75B,EAAE+D,OAAOxD,EAAEu+B,QAAQ,CAAC,GAAGh8B,OAAOo0B,EAAEb,UAAU,oBAAoBvzB,OAAOhC,EAAElB,OAAU0/B,QAAQxgC,EAAEL,EAAE6/B,MAAgB7/B,EAAE0F,EAAEA,EAAEA,EAAE,CAAC,GAAnBtF,EAAEJ,EAAEggC,OAAmB5E,YAAY/6B,EAAE,CAACw/B,MAAMx/B,GAAG,CAAC,GAAG,CAAC,EAAE,CAACigC,MAAMlgC,EAAEigC,QAAQhF,KAAK,OAAO,GAAGj7B,EAAEu5B,GAAGv5B,EAAEkF,SAASjE,SAASrB,EAAE2gC,MAAMvgC,IAAIA,EAAE,IAAIa,KAAK,CAAC4pB,IAAI,OAAOuQ,WAAWp7B,EAAEknB,SAAS,CAACjnB,KAAKI,GAAGD,EAAEa,KAAK,CAAC4pB,IAAI,OAAOuQ,WAAW,CAACkF,MAAM,WAAWpZ,SAAS,CAAC7mB,KAAKD,EAAE,IAAIJ,EAAEC,EAAEI,EAAED,CAAC,GAAE,EAAE,GAAGoC,EAAE,CAACg4B,OAAO,WAAW,MAAM,CAAC4L,KAAK,SAASpmC,GAAG,IAA4LK,EAAxLJ,EAAE,EAAEmB,UAAUC,aAAQ,IAASD,UAAU,GAAGA,UAAU,GAAG,CAAC,EAAgBhB,OAAE,KAAhBC,EAAEJ,EAAE0/B,WAAuB9G,EAAEx4B,EAAYG,OAAE,KAAZH,EAAEJ,EAAE4/B,OAAmB,KAAKx/B,EAAcI,OAAE,KAAdJ,EAAEJ,EAAEogC,SAAqB,GAAGhgC,EAAiBK,OAAE,KAAjBL,EAAEJ,EAAEm7B,YAAwB,CAAC,EAAE/6B,EAAac,OAAE,KAAbd,EAAEJ,EAAEqF,QAAoB,CAAC,EAAEjF,EAAE,OAAOi/B,GAAG,CAACoE,KAAK,OAAO7C,QAAQ7gC,IAAG,WAAW,OAAOo+B,GAAG,2BAA2B,CAACyC,QAAQ7gC,EAAEylC,OAAOxlC,IAAI2gC,GAAG,CAACC,QAAQ7gC,EAAE2/B,UAAUj6B,EAAEA,EAAE,CAAC,EAAEmzB,GAAGz4B,GAAGy/B,MAAMr/B,EAAEw/B,MAAM,CAAC5E,WAAW16B,EAAE4E,OAAOnE,EAAEk/B,QAAQ,CAAC,GAAGh8B,OAAOo0B,EAAEb,UAAU,iBAAiBvzB,OAAOhC,EAAE5B,MAAM,GAAE,EAAE,EAAEklC,SAAS,SAAS3lC,GAAGA,EAAEqmC,mBAAmB,SAASrmC,EAAEC,GAAG,IAAII,EAAED,EAAEH,EAAE4/B,MAAMr/B,EAAEP,EAAE0/B,UAAUl/B,EAAER,EAAE+/B,MAAMt/B,EAAE,KAAKS,EAAE,KAAK,OAAO2E,IAAIzF,EAAEimC,SAASC,iBAAiBvmC,GAAGwmC,SAAS,IAAI9lC,GAAGT,EAAED,EAAEymC,yBAAyBtG,MAAM9/B,EAAEc,EAAElB,EAAEmgC,OAAO//B,GAAGo4B,EAAET,WAAW53B,IAAIK,EAAE26B,WAAW,eAAe,QAAQwD,QAAQqG,QAAQ,CAACjlC,EAAE4gC,GAAG,CAACC,QAAQ7gC,EAAEm6B,UAAUgG,MAAMz/B,EAAE0/B,OAAOj/B,EAAEw+B,UAAUn/B,EAAEq/B,MAAMz/B,EAAE4/B,MAAMv/B,EAAEw/B,WAAU,KAAM,CAAC,GAA7wG,IAAgxGyG,GAAG,IAAI3M,OAAO,IAAI,MAAM4M,GAAG,CAAC,QAAQ,SAAS,SAASC,GAAGnkC,EAAEC,GAAG,IAAIiB,EAAE,GAAGU,OAAO0a,GAAG1a,OAAO3B,EAAEg3B,QAAQ,IAAI,MAAM,OAAO,IAAIkF,SAAQ,SAASv+B,EAAEL,GAAG,GAAG,OAAOyC,EAAEg1B,aAAa9zB,GAAG,OAAOtD,IAAI,IAAID,EAAEI,EAAEC,EAAEC,EAAET,EAAEkB,EAAEI,EAAEO,EAAEu3B,GAAG52B,EAAEykB,UAAUpmB,QAAO,SAASd,GAAG,OAAOA,EAAEy3B,aAAatY,KAAKzc,CAAC,IAAG,GAAGL,EAAEQ,EAAE0jC,iBAAiB9jC,EAAEC,GAAGJ,EAAED,EAAEwkC,iBAAiB,eAAe3H,MAAMrI,GAAGt0B,EAAEF,EAAEwkC,iBAAiB,eAAerkC,EAAEH,EAAEwkC,iBAAiB,WAAW,GAAG/kC,IAAIQ,EAAE,OAAOG,EAAEqkC,YAAYhlC,GAAGzB,IAAIiC,GAAG,SAASE,GAAG,KAAKA,GAAGrB,EAAEkB,EAAEwkC,iBAAiB,WAAW5mC,GAAG,CAAC,SAASmC,QAAQE,EAAE,IAAImoB,EAAE2L,EAAEh2B,GAAG,CAAC,QAAQ,UAAU,QAAQ,OAAO,UAAU,SAAS,OAAOgC,QAAQE,EAAE,IAAIk0B,EAAEv2B,GAAGqC,EAAE,GAAGykC,eAAejQ,EAAE72B,GAAGsC,GAAGC,GAAMH,GAAGG,EAAErB,GAAGu4B,QAAQgN,GAAG,IAAIzmC,EAAE,EAAEkB,GAAGoB,EAAEF,GAAGhB,OAAOE,EAAE,QAAQiB,EAAED,EAAEw5B,WAAW97B,KAAKuC,GAAG,OAAOvC,EAAE,EAAEkB,GAAG,QAAQI,EAAEgB,EAAEw5B,WAAW97B,EAAE,KAAKsB,GAAG,MAAM,MAAMiB,EAAE,OAAOjB,EAAE,MAAM,MAAMiB,EAAEA,EAAEmkC,GAAG,IAAIplC,GAAGA,GAAGolC,GAAG,GAApLtkC,EAAuL,CAACV,MAAMm6B,IAAIv6B,EAAE,IAAIc,EAAEhB,QAAQgB,EAAE,KAAKA,EAAE,IAAIA,EAAE,GAAGA,GAAG2kC,YAAYxkC,GAAGjB,IAAKI,MAAMJ,EAAEc,EAAE2kC,YAAY3kC,EAAEC,EAAE,GAAG2kC,WAAW,eAAe3kC,EAAE66B,GAAG/8B,EAAEoC,GAAGhC,EAAE8B,EAAED,IAAIG,EAAEk6B,GAAGr6B,EAAEG,GAAGH,EAAE86B,GAAG,MAAM96B,IAAIA,EAAEG,IAAIH,EAAE,CAACk5B,OAAO,MAAM11B,SAASxD,GAAG,OAAO,CAACk5B,OAAO,KAAK11B,SAAS,OAAOA,UAAUxD,EAAEk5B,SAASj5B,EAAED,EAAEwD,SAASzF,EAAEiC,EAAEk5B,UAAUj5B,GAAGf,GAAGO,GAAGA,EAAE21B,aAAa3H,KAAK1vB,GAAG0B,EAAE21B,aAAaxB,KAAKz1B,EAAEH,KAAKoC,EAAEy3B,aAAav2B,EAAEnD,GAAGsB,GAAGW,EAAEqkC,YAAYhlC,IAAIpB,GAAGD,EAAE,CAACoF,SAAS,KAAKg6B,MAAM,KAAKE,QAAQ,KAAKxE,OAAO,KAAKoE,UAAU9G,EAAE+G,QAAO,EAAGhL,KAAK,CAAC/uB,SAAS,KAAK01B,OAAO,KAAKqC,KAAK,IAAIkC,OAAO,KAAKE,MAAM,CAACK,QAAQ,GAAG/6B,OAAO,CAAC,EAAE81B,WAAW,CAAC,KAAK4E,OAAO5E,WAAWjc,GAAGzc,EAAEw+B,GAAG5+B,EAAElC,GAAGglC,MAAK,SAASplC,GAAG,IAAIC,EAAEw/B,GAAG/5B,EAAEA,EAAE,CAAC,EAAEjF,GAAG,CAAC,EAAE,CAAC2d,MAAM,CAACshB,KAAK1/B,EAAE40B,KAA3imB,CAAC2G,OAAO,KAAK11B,SAAS,KAAK+3B,KAAK,KAAshmBrC,OAAOn7B,EAAEyF,SAASrF,EAAEw/B,MAAMt/B,EAAEu/B,WAAU,KAAMjgC,EAAE4D,EAAEm+B,gBAAgB,6BAA6B,OAAO,aAAar/B,EAAED,EAAE83B,aAAav6B,EAAEyC,EAAE6hC,YAAY7hC,EAAE4/B,YAAYriC,GAAGA,EAAEuiC,UAAUtiC,EAAEsE,IAAI42B,IAAIE,KAAK,MAAM54B,EAAEogC,gBAAgBl/B,GAAGtD,GAAG,IAAGglC,MAAMrlC,KAAKK,GAAG,GAAE,CAAC,SAAS6mC,GAAGlnC,GAAG,OAAO4+B,QAAQuG,IAAI,CAACyB,GAAG5mC,EAAE,YAAY4mC,GAAG5mC,EAAE,YAAY,CAAC,SAASmnC,GAAGnnC,GAAG,QAAQA,EAAEkiC,aAAa/hC,SAAS8B,OAAOmgB,EAAEhgB,QAAQpC,EAAEq6B,QAAQC,gBAAgBt6B,EAAEy3B,aAAatY,IAAInf,EAAEkiC,YAAY,QAAQliC,EAAEkiC,WAAW7H,QAAQ,CAAC,SAAS+M,GAAG5mC,GAAG,GAAGiF,EAAE,OAAO,IAAIm5B,SAAQ,SAAS5+B,EAAEC,GAAG,IAAII,EAAEg5B,GAAG74B,EAAE0kC,iBAAiB,MAAMpkC,OAAOqmC,IAAI5iC,IAAI2iC,IAAI9mC,EAAEqhC,GAAGC,MAAM,wBAAwBwB,KAAKtE,QAAQuG,IAAI9kC,GAAG+kC,MAAK,WAAWhlC,IAAI+iC,KAAKnjC,GAAG,IAAGqlC,OAAM,WAAWjlC,IAAI+iC,KAAKljC,GAAG,GAAE,GAAE,CAAC,SAASonC,GAAGrnC,GAAG,OAAOA,EAAE+mC,cAAcvN,MAAM,KAAK7zB,QAAO,SAAS3F,EAAEC,GAAG,IAAII,EAAEJ,EAAE8mC,cAAcvN,MAAM,KAAYp5B,GAAPH,EAAEI,EAAE,GAAKA,EAAE4E,MAAM,GAAGo2B,KAAK,MAAK,GAAGp7B,GAAG,MAAMG,EAAE,OAAOJ,EAAE+4B,OAAM,EAAG/4B,EAAE,GAAGC,GAAG,MAAMG,EAAE,OAAOJ,EAAEg5B,OAAM,EAAGh5B,EAAE,GAAGI,EAAEknC,WAAWlnC,GAAGmnC,MAAMnnC,GAAG,OAAOJ,EAAE,OAAOC,GAAG,IAAI,OAAOD,EAAE84B,KAAK94B,EAAE84B,KAAK14B,EAAE,MAAM,IAAI,SAASJ,EAAE84B,KAAK94B,EAAE84B,KAAK14B,EAAE,MAAM,IAAI,OAAOJ,EAAEo1B,EAAEp1B,EAAEo1B,EAAEh1B,EAAE,MAAM,IAAI,QAAQJ,EAAEo1B,EAAEp1B,EAAEo1B,EAAEh1B,EAAE,MAAM,IAAI,KAAKJ,EAAE8vB,EAAE9vB,EAAE8vB,EAAE1vB,EAAE,MAAM,IAAI,OAAOJ,EAAE8vB,EAAE9vB,EAAE8vB,EAAE1vB,EAAE,MAAM,IAAI,SAASJ,EAAE6xB,OAAO7xB,EAAE6xB,OAAOzxB,EAAE,OAAOJ,CAAC,GAAE,CAAC84B,KAAK,GAAG1D,EAAE,EAAEtF,EAAE,EAAEiJ,OAAM,EAAGC,OAAM,EAAGnH,OAAO,GAAG,CAAC,IAA2M2V,GAAvMC,IAAG,EAAGC,GAAG,CAACtS,EAAE,EAAEtF,EAAE,EAAEqQ,MAAM,OAAOC,OAAO,QAAQ,SAASuH,GAAG3nC,GAAG,OAAOA,EAAEo7B,aAAap7B,EAAEo7B,WAAWpK,QAAS,EAAE5vB,UAAUC,aAAQ,IAASD,UAAU,KAAKA,UAAU,MAAOpB,EAAEo7B,WAAWpK,KAAK,SAAShxB,CAAC,CAAQwnC,GAAchJ,GAAcj9B,EAAE,CAACkB,EAAEX,EAAE7B,EAAEqC,EAAEE,EAAE,CAAC+C,MAAM,WAAW,MAAM,CAACmgC,0BAA0B,SAAS1lC,GAAG,OAAOA,EAAEwjC,uBAAuB4D,GAAGpnC,CAAC,EAAE,EAAE2lC,SAAS,SAAS3lC,GAAGA,EAAE4nC,mBAAmB,SAAS5nC,GAAGA,EAAEA,EAAEqK,KAAKouB,EAAER,sBAAsBmP,QAAG,IAASpnC,EAAE4D,EAAE5D,EAAE,CAAC,GAAG,CAACw6B,OAAO,WAAW,MAAM,CAACC,IAAI,CAACoN,QAAQ,WAAW3E,KAAKuE,IAAG,CAAE,GAAG,EAAEliC,MAAM,WAAW,MAAM,CAAC8K,UAAU,WAAWgzB,GAAGlF,GAAG,4BAA4B,CAAC,GAAG,EAAEM,OAAO,WAAW2E,IAAIA,GAAG0E,YAAY,EAAEhJ,MAAM,SAAS9+B,GAAGA,EAAEA,EAAEyjC,qBAAqBgE,GAAGtE,KAAKE,GAAGlF,GAAG,4BAA4B,CAACsF,qBAAqBzjC,IAAI,EAAE,GAAG,CAACw6B,OAAO,WAAW,MAAM,CAACyE,MAAM,CAACU,UAAU0H,IAAI,EAAE9hC,MAAM,WAAW,MAAM,CAACwiC,oBAAoB,SAAS/nC,EAAEC,GAAyC,OAAtCA,EAAEA,EAAEw3B,aAAa,wBAAgCz3B,EAAE2/B,UAAU0H,GAAGpnC,IAAID,CAAC,EAAE,EAAE2lC,SAAS,SAAS3lC,GAAGA,EAAEgoC,kCAAkC,SAAShoC,GAAG,IAAIC,EAAED,EAAE0/B,KAAKr/B,EAAEL,EAAE2/B,UAAUv/B,EAAEJ,EAAE+lC,eAAevlC,EAAER,EAAEgmC,UAAUvlC,EAAE,CAACk/B,UAAU,aAAat7B,OAAOjE,EAAE,EAAE,UAA0T,OAAhTJ,EAAE,aAAaqE,OAAO,GAAGhE,EAAE+0B,EAAE,MAAM/wB,OAAO,GAAGhE,EAAEyvB,EAAE,MAAM1vB,EAAE,SAASiE,OAAOhE,EAAEy4B,KAAK,IAAIz4B,EAAE04B,OAAO,EAAE,GAAG,MAAM10B,OAAOhE,EAAEy4B,KAAK,IAAIz4B,EAAE24B,OAAO,EAAE,GAAG,MAAM34B,EAAE,UAAUgE,OAAOhE,EAAEwxB,OAAO,SAA4I,CAAChH,IAAI,IAAIuQ,WAAW11B,EAAE,CAAC,GAA1JlF,EAAE,CAACynC,MAAMxnC,EAAEynC,MAAM,CAACvI,UAAU,GAAGt7B,OAAOrE,EAAE,KAAKqE,OAAOjE,EAAE,KAAKiE,OAAOhE,IAAI8nC,KAAK,CAACxI,UAAU,aAAat7B,OAAO7D,EAAE,GAAG,EAAE,aAA6CynC,OAAO/gB,SAAS,CAAC,CAAC2D,IAAI,IAAIuQ,WAAW11B,EAAE,CAAC,EAAElF,EAAE0nC,OAAOhhB,SAAS,CAAC,CAAC2D,IAAI5qB,EAAE2F,KAAKilB,IAAI3D,SAASjnB,EAAE2F,KAAKshB,SAASkU,WAAW11B,EAAEA,EAAE,CAAC,EAAEzF,EAAE2F,KAAKw1B,YAAY56B,EAAE2nC,UAAU,CAAC,GAAG,CAAC5iC,MAAM,WAAW,MAAM,CAACwiC,oBAAoB,SAAS/nC,EAAEC,GAAG,IAAqCI,GAAjCA,EAAEJ,EAAEw3B,aAAa,iBAAoB8F,GAAGl9B,EAAEm5B,MAAM,KAAKj1B,KAAI,SAASvE,GAAG,OAAOA,EAAE45B,MAAM,KAAt8rB,CAAC2B,OAAO,KAAK11B,SAAS,KAAK+3B,KAAK,IAA+6rB,OAAOv9B,EAAEk7B,SAASl7B,EAAEk7B,OAAOe,IAAIt8B,EAAE40B,KAAKv0B,EAAEL,EAAE8/B,OAAO7/B,EAAEw3B,aAAa,mBAAmBz3B,CAAC,EAAE,EAAE2lC,SAAS,SAAS3lC,GAAGA,EAAEooC,qBAAqB,SAASpoC,GAAG,IAAIC,EAAED,EAAEknB,SAAS7mB,EAAEL,EAAEo7B,WAAWh7B,EAAEJ,EAAE0/B,KAAKl/B,EAAER,EAAE40B,KAAKn0B,EAAET,EAAE8/B,OAAOp/B,EAAEV,EAAE2/B,UAAUx+B,EAAEf,EAAE+/B,MAAM5+B,EAAEnB,EAAEwF,KAAK9D,EAAEtB,EAAE2/B,MAAw+B,OAAl+BngC,EAAEQ,EAAEoF,KAAQpF,GAAGJ,EAAE,CAACu/B,UAAUj/B,EAAEqlC,eAAejkC,EAAEkkC,UAAU7kC,IAAIw+B,UAAUj/B,EAAEN,EAAE2lC,eAAejkC,EAAE1B,EAAE4lC,UAAU7kC,EAAE,CAACw+B,UAAU,aAAat7B,OAAO3D,EAAE,EAAE,UAAUN,EAAE,aAAaiE,OAAO,GAAG7D,EAAE40B,EAAE,MAAM/wB,OAAO,GAAG7D,EAAEsvB,EAAE,MAAMpvB,EAAE,SAAS2D,OAAO7D,EAAEs4B,KAAK,IAAIt4B,EAAEu4B,OAAO,EAAE,GAAG,MAAM10B,OAAO7D,EAAEs4B,KAAK,IAAIt4B,EAAEw4B,OAAO,EAAE,GAAG,MAAMx4B,EAAE,UAAU6D,OAAO7D,EAAEqxB,OAAO,SAAxTnxB,EAAiU,CAACunC,MAAM9mC,EAAE+mC,MAAM,CAACvI,UAAU,GAAGt7B,OAAOjE,EAAE,KAAKiE,OAAO3D,EAAE,KAAK2D,OAAO7D,IAAI2nC,KAAK,CAACxI,UAAU,aAAat7B,OAAOvC,EAAE,GAAG,EAAE,YAAatB,EAAE,CAACqqB,IAAI,OAAOuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEgiC,IAAI,CAAC,EAAE,CAAC1W,KAAK,WAAWlvB,EAAEP,EAAE2lB,SAAS,CAACA,SAAS3lB,EAAE2lB,SAAS3iB,IAAIojC,KAAK,CAAC,EAAE7lC,EAAE,CAAC+oB,IAAI,IAAIuQ,WAAW11B,EAAE,CAAC,EAAEhF,EAAEwnC,OAAOhhB,SAAS,CAACygB,GAAGjiC,EAAE,CAACmlB,IAAItpB,EAAEspB,IAAIuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEnE,EAAE65B,YAAY16B,EAAEynC,OAAOrmC,MAAMpB,EAAE,CAACmqB,IAAI,IAAIuQ,WAAW11B,EAAE,CAAC,EAAEhF,EAAEunC,OAAO/gB,SAAS,CAACplB,IAAIA,EAAE,QAAQuC,OAAO5D,GAAGy4B,KAAMz4B,EAAE,QAAQ4D,OAAO5D,GAAGy4B,KAAMx4B,EAAE,CAACmqB,IAAI,OAAOuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEgiC,IAAI,CAAC,EAAE,CAAChH,GAAG5+B,EAAEumC,UAAU,iBAAiBC,iBAAiB,mBAAmBphB,SAAS,CAAC1mB,EAAEE,IAAIA,EAAE,CAACmqB,IAAI,OAAO3D,SAAS,CAAC,CAAC2D,IAAI,WAAWuQ,WAAW,CAACsF,GAAGjgC,GAAGymB,SAAS,MAASlnB,EAAG6qB,IAAI7qB,EAAEknB,SAAS,CAAClnB,IAAIU,IAAWT,EAAEgB,KAAKP,EAAE,CAACmqB,IAAI,OAAOuQ,WAAW11B,EAAE,CAACsrB,KAAK,eAAe,YAAY,QAAQ3sB,OAAO5D,EAAE,KAAKm0B,KAAK,QAAQvwB,OAAOvC,EAAE,MAAM4lC,MAAM,CAACxgB,SAASjnB,EAAEm7B,WAAW/6B,EAAE,CAAC,GAAG,CAACslC,SAAS,SAAS3lC,GAAG,IAAIS,GAAE,EAAGoC,EAAE0lC,aAAa9nC,EAAEoC,EAAE0lC,WAAW,oCAAoCC,SAASxoC,EAAEyoC,oBAAoB,WAAW,IAAIzoC,EAAE,GAAGC,EAAE,CAAC+wB,KAAK,gBAAgB3wB,EAAE,CAACqoC,cAAc,MAAMC,YAAY,aAAaC,IAAI,MAAM5oC,EAAEiB,KAAK,CAAC4pB,IAAI,OAAOuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEzF,GAAG,CAAC,EAAE,CAAC6F,EAAE,u4CAAu4C,IAAI1F,EAAEsF,EAAEA,EAAE,CAAC,EAAErF,GAAG,CAAC,EAAE,CAACwjC,cAAc,YAAYrjC,EAAE,CAACqqB,IAAI,SAASuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEzF,GAAG,CAAC,EAAE,CAAC4oC,GAAG,MAAMC,GAAG,MAAMhnC,EAAE,OAAOolB,SAAS,IAAI,OAAOzmB,GAAGD,EAAE0mB,SAASjmB,KAAK,CAAC4pB,IAAI,UAAUuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAErF,GAAG,CAAC,EAAE,CAACwjC,cAAc,IAAIxH,OAAO,wBAAwB,CAACxR,IAAI,UAAUuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEtF,GAAG,CAAC,EAAE,CAACi8B,OAAO,mBAAmBr8B,EAAEiB,KAAKT,GAAGR,EAAEiB,KAAK,CAAC4pB,IAAI,OAAOuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEzF,GAAG,CAAC,EAAE,CAAC8oC,QAAQ,IAAIjjC,EAAE,ySAAySohB,SAASzmB,EAAE,GAAG,CAAC,CAACoqB,IAAI,UAAUuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEtF,GAAG,CAAC,EAAE,CAACi8B,OAAO,qBAAqB57B,GAAGT,EAAEiB,KAAK,CAAC4pB,IAAI,OAAOuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEzF,GAAG,CAAC,EAAE,CAAC8oC,QAAQ,IAAIjjC,EAAE,gJAAgJohB,SAAS,CAAC,CAAC2D,IAAI,UAAUuQ,WAAW11B,EAAEA,EAAE,CAAC,EAAEtF,GAAG,CAAC,EAAE,CAACi8B,OAAO,qBAAqB,CAACxR,IAAI,IAAIuQ,WAAW,CAACkF,MAAM,WAAWpZ,SAASlnB,EAAE,CAAC,GAAG,CAACuF,MAAM,WAAW,MAAM,CAACwiC,oBAAoB,SAAS/nC,EAAEC,GAAsC,OAAnCA,EAAEA,EAAEw3B,aAAa,kBAAyBz3B,EAAE4/B,OAAO,OAAO3/B,IAAI,KAAKA,GAAGA,GAAGD,CAAC,EAAE,IAAIg+B,GAAG,CAAC,EAAEr9B,OAAOC,KAAKq9B,IAAI38B,SAAQ,SAAStB,IAAI,IAAIk+B,GAAG97B,QAAQpC,WAAWi+B,GAAGj+B,EAAE,IAAGuB,EAAED,SAAQ,SAAStB,GAAG,IAAIC,EAAEI,EAAEL,EAAEw6B,OAAOx6B,EAAEw6B,SAAS,CAAC,EAAE75B,OAAOC,KAAKP,GAAGiB,SAAQ,SAASrB,GAAG,mBAAmBI,EAAEJ,KAAKunC,GAAGvnC,GAAGI,EAAEJ,IAAI,WAAWO,EAAEH,EAAEJ,KAAKU,OAAOC,KAAKP,EAAEJ,IAAIqB,SAAQ,SAAStB,GAAGwnC,GAAGvnC,KAAKunC,GAAGvnC,GAAG,CAAC,GAAGunC,GAAGvnC,GAAGD,GAAGK,EAAEJ,GAAGD,EAAE,GAAE,IAAGA,EAAEuF,QAAQtF,EAAED,EAAEuF,QAAQ5E,OAAOC,KAAKX,GAAGqB,SAAQ,SAAStB,GAAGg+B,GAAGh+B,KAAKg+B,GAAGh+B,GAAG,IAAIg+B,GAAGh+B,GAAGiB,KAAKhB,EAAED,GAAG,KAAIA,EAAE2lC,UAAU3lC,EAAE2lC,SAAS1H,GAAG,IAAG,SAASj+B,GAAG,IAAI,IAAI,IAAIC,EAAEmB,UAAUC,OAAOhB,EAAE,IAAI0B,MAAM,EAAE9B,EAAEA,EAAE,EAAE,GAAGG,EAAE,EAAEA,EAAEH,EAAEG,IAAIC,EAAED,EAAE,GAAGgB,UAAUhB,GAAGJ,EAAEkB,WAAM,EAAOb,EAAE,CAAC,MAAML,GAAG,IAAIm2B,EAAE,MAAMn2B,CAAC,CAAC,CAA3I,EAA6I,SAASA,GAAG2D,IAAId,EAAEmmC,cAAcnmC,EAAEmmC,YAAYxK,IAAIvD,IAAG,WAAW+D,KAAKZ,GAAG,YAAY,KAAItD,GAAGv1B,MAAMG,EAAEA,EAAE,CAAC,EAAEo1B,GAAGv1B,OAAO,CAAC,EAAE,CAACS,QAAQ,SAAShG,EAAEC,GAAG66B,GAAGx1B,OAAOtF,GAAG0F,EAAEA,EAAE,CAAC,EAAEo1B,GAAGx1B,OAAOtF,IAAI,CAAC,GAAGC,GAAG48B,KAAKmC,IAAI,EAAEiK,SAAS,SAASjpC,GAAGA,EAAEsB,SAAQ,SAAStB,GAAgBA,GAATC,EAAEsC,EAAEvC,EAAE,IAAO,GAAjB,IAAoBC,EAAEA,EAAE,GAAG66B,GAAGx1B,OAAOtF,GAAG0F,EAAEA,EAAE,CAAC,EAAEo1B,GAAGx1B,OAAOtF,IAAI,CAAC,GAAGC,EAAE,IAAG48B,KAAKmC,IAAI,EAAEkK,SAAS,SAASlpC,GAAG,IAAIC,GAAGA,EAAE66B,GAAGt1B,OAAOvE,KAAKC,MAAMjB,EAAEoC,EAAErC,IAAI68B,KAAKmC,IAAI,GAAG,GAAE,CAA/+uD,KCJvnt3CmK,EAA2B,CAAC,EAGhC,SAASC,EAAoBC,GAE5B,IAAIC,EAAeH,EAAyBE,GAC5C,QAAqBE,IAAjBD,EACH,OAAOA,EAAaE,QAGrB,IAAIC,EAASN,EAAyBE,GAAY,CAGjDG,QAAS,CAAC,GAOX,OAHAE,EAAoBL,GAAUI,EAAQA,EAAOD,QAASJ,GAG/CK,EAAOD,OACf,CCrBAJ,EAAoB7mC,EAAKknC,IACxB,IAAIE,EAASF,GAAUA,EAAOG,WAC7B,IAAOH,EAAiB,QACxB,IAAM,EAEP,OADAL,EAAoBtjC,EAAE6jC,EAAQ,CAAEvpC,EAAGupC,IAC5BA,CAAM,ECLdP,EAAoBtjC,EAAI,CAAC0jC,EAASK,KACjC,IAAI,IAAI/mB,KAAO+mB,EACXT,EAAoB1mC,EAAEmnC,EAAY/mB,KAASsmB,EAAoB1mC,EAAE8mC,EAAS1mB,IAC5EniB,OAAOe,eAAe8nC,EAAS1mB,EAAK,CAAE9hB,YAAY,EAAM4B,IAAKinC,EAAW/mB,IAE1E,ECNDsmB,EAAoB1mC,EAAI,CAAConC,EAAKC,IAAUppC,OAAOmE,UAAUklC,eAAehlC,KAAK8kC,EAAKC","sources":["webpack://pydata_sphinx_theme/./node_modules/@fortawesome/fontawesome-free/js/all.min.js","webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/compat get default export","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand"],"sourcesContent":["/*!\n * Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com\n * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)\n * Copyright 2024 Fonticons, Inc.\n */\n!function(){\"use strict\";var c={},l={};try{\"undefined\"!=typeof window&&(c=window),\"undefined\"!=typeof document&&(l=document)}catch(c){}var s=(c.navigator||{}).userAgent,a=void 0===s?\"\":s,z=c,e=l;z.document,e.documentElement&&e.head&&\"function\"==typeof e.addEventListener&&e.createElement,~a.indexOf(\"MSIE\")||a.indexOf(\"Trident/\");function H(l,c){var s,a=Object.keys(l);return Object.getOwnPropertySymbols&&(s=Object.getOwnPropertySymbols(l),c&&(s=s.filter(function(c){return Object.getOwnPropertyDescriptor(l,c).enumerable})),a.push.apply(a,s)),a}function t(l){for(var c=1;cc.length)&&(l=c.length);for(var s=0,a=new Array(l);sc.length)&&(l=c.length);for(var s=0,a=new Array(l);sc.length)&&(l=c.length);for(var s=0,a=new Array(l);sc.length)&&(l=c.length);for(var s=0,a=new Array(l);s>>0;s--;)l[s]=c[s];return l}function a1(c){return c.classList?s1(c.classList):(c.getAttribute(\"class\")||\"\").split(\" \").filter(function(c){return c})}function z1(c){return\"\".concat(c).replace(/&/g,\"&\").replace(/\"/g,\""\").replace(/'/g,\"'\").replace(//g,\">\")}function e1(s){return Object.keys(s||{}).reduce(function(c,l){return c+\"\".concat(l,\": \").concat(s[l].trim(),\";\")},\"\")}function H1(c){return c.size!==J.size||c.x!==J.x||c.y!==J.y||c.rotate!==J.rotate||c.flipX||c.flipY}function t1(){var c,l,s=b,a=Q.cssPrefix,z=Q.replacementClass,e=':host,:root{--fa-font-solid:normal 900 1em/1 \"Font Awesome 6 Solid\";--fa-font-regular:normal 400 1em/1 \"Font Awesome 6 Regular\";--fa-font-light:normal 300 1em/1 \"Font Awesome 6 Light\";--fa-font-thin:normal 100 1em/1 \"Font Awesome 6 Thin\";--fa-font-duotone:normal 900 1em/1 \"Font Awesome 6 Duotone\";--fa-font-sharp-solid:normal 900 1em/1 \"Font Awesome 6 Sharp\";--fa-font-sharp-regular:normal 400 1em/1 \"Font Awesome 6 Sharp\";--fa-font-sharp-light:normal 300 1em/1 \"Font Awesome 6 Sharp\";--fa-font-sharp-thin:normal 100 1em/1 \"Font Awesome 6 Sharp\";--fa-font-brands:normal 400 1em/1 \"Font Awesome 6 Brands\"}svg:not(:host).svg-inline--fa,svg:not(:root).svg-inline--fa{overflow:visible;box-sizing:content-box}.svg-inline--fa{display:var(--fa-display,inline-block);height:1em;overflow:visible;vertical-align:-.125em}.svg-inline--fa.fa-2xs{vertical-align:.1em}.svg-inline--fa.fa-xs{vertical-align:0}.svg-inline--fa.fa-sm{vertical-align:-.0714285705em}.svg-inline--fa.fa-lg{vertical-align:-.2em}.svg-inline--fa.fa-xl{vertical-align:-.25em}.svg-inline--fa.fa-2xl{vertical-align:-.3125em}.svg-inline--fa.fa-pull-left{margin-right:var(--fa-pull-margin,.3em);width:auto}.svg-inline--fa.fa-pull-right{margin-left:var(--fa-pull-margin,.3em);width:auto}.svg-inline--fa.fa-li{width:var(--fa-li-width,2em);top:.25em}.svg-inline--fa.fa-fw{width:var(--fa-fw-width,1.25em)}.fa-layers svg.svg-inline--fa{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0}.fa-layers-counter,.fa-layers-text{display:inline-block;position:absolute;text-align:center}.fa-layers{display:inline-block;height:1em;position:relative;text-align:center;vertical-align:-.125em;width:1em}.fa-layers svg.svg-inline--fa{-webkit-transform-origin:center center;transform-origin:center center}.fa-layers-text{left:50%;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);-webkit-transform-origin:center center;transform-origin:center center}.fa-layers-counter{background-color:var(--fa-counter-background-color,#ff253a);border-radius:var(--fa-counter-border-radius,1em);box-sizing:border-box;color:var(--fa-inverse,#fff);line-height:var(--fa-counter-line-height,1);max-width:var(--fa-counter-max-width,5em);min-width:var(--fa-counter-min-width,1.5em);overflow:hidden;padding:var(--fa-counter-padding,.25em .5em);right:var(--fa-right,0);text-overflow:ellipsis;top:var(--fa-top,0);-webkit-transform:scale(var(--fa-counter-scale,.25));transform:scale(var(--fa-counter-scale,.25));-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-bottom-right{bottom:var(--fa-bottom,0);right:var(--fa-right,0);top:auto;-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:bottom right;transform-origin:bottom right}.fa-layers-bottom-left{bottom:var(--fa-bottom,0);left:var(--fa-left,0);right:auto;top:auto;-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:bottom left;transform-origin:bottom left}.fa-layers-top-right{top:var(--fa-top,0);right:var(--fa-right,0);-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-top-left{left:var(--fa-left,0);right:auto;top:var(--fa-top,0);-webkit-transform:scale(var(--fa-layers-scale,.25));transform:scale(var(--fa-layers-scale,.25));-webkit-transform-origin:top left;transform-origin:top left}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-2xs{font-size:.625em;line-height:.1em;vertical-align:.225em}.fa-xs{font-size:.75em;line-height:.0833333337em;vertical-align:.125em}.fa-sm{font-size:.875em;line-height:.0714285718em;vertical-align:.0535714295em}.fa-lg{font-size:1.25em;line-height:.05em;vertical-align:-.075em}.fa-xl{font-size:1.5em;line-height:.0416666682em;vertical-align:-.125em}.fa-2xl{font-size:2em;line-height:.03125em;vertical-align:-.1875em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:var(--fa-li-margin,2.5em);padding-left:0}.fa-ul>li{position:relative}.fa-li{left:calc(var(--fa-li-width,2em) * -1);position:absolute;text-align:center;width:var(--fa-li-width,2em);line-height:inherit}.fa-border{border-color:var(--fa-border-color,#eee);border-radius:var(--fa-border-radius,.1em);border-style:var(--fa-border-style,solid);border-width:var(--fa-border-width,.08em);padding:var(--fa-border-padding,.2em .25em .15em)}.fa-pull-left{float:left;margin-right:var(--fa-pull-margin,.3em)}.fa-pull-right{float:right;margin-left:var(--fa-pull-margin,.3em)}.fa-beat{-webkit-animation-name:fa-beat;animation-name:fa-beat;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-bounce{-webkit-animation-name:fa-bounce;animation-name:fa-bounce;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1))}.fa-fade{-webkit-animation-name:fa-fade;animation-name:fa-fade;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-beat-fade{-webkit-animation-name:fa-beat-fade;animation-name:fa-beat-fade;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-flip{-webkit-animation-name:fa-flip;animation-name:fa-flip;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-shake{-webkit-animation-name:fa-shake;animation-name:fa-shake;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-spin{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,2s);animation-duration:var(--fa-animation-duration,2s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-spin-reverse{--fa-animation-direction:reverse}.fa-pulse,.fa-spin-pulse{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,steps(8));animation-timing-function:var(--fa-animation-timing,steps(8))}@media (prefers-reduced-motion:reduce){.fa-beat,.fa-beat-fade,.fa-bounce,.fa-fade,.fa-flip,.fa-pulse,.fa-shake,.fa-spin,.fa-spin-pulse{-webkit-animation-delay:-1ms;animation-delay:-1ms;-webkit-animation-duration:1ms;animation-duration:1ms;-webkit-animation-iteration-count:1;animation-iteration-count:1;-webkit-transition-delay:0s;transition-delay:0s;-webkit-transition-duration:0s;transition-duration:0s}}@-webkit-keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@-webkit-keyframes fa-bounce{0%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}100%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}}@keyframes fa-bounce{0%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1,1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}100%{-webkit-transform:scale(1,1) translateY(0);transform:scale(1,1) translateY(0)}}@-webkit-keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@-webkit-keyframes fa-beat-fade{0%,100%{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@keyframes fa-beat-fade{0%,100%{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@-webkit-keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@-webkit-keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}24%,8%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}100%,40%{-webkit-transform:rotate(0);transform:rotate(0)}}@keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}24%,8%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}100%,40%{-webkit-transform:rotate(0);transform:rotate(0)}}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}.fa-rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-webkit-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{-webkit-transform:scale(1,-1);transform:scale(1,-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1,-1);transform:scale(-1,-1)}.fa-rotate-by{-webkit-transform:rotate(var(--fa-rotate-angle,0));transform:rotate(var(--fa-rotate-angle,0))}.fa-stack{display:inline-block;vertical-align:middle;height:2em;position:relative;width:2.5em}.fa-stack-1x,.fa-stack-2x{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0;z-index:var(--fa-stack-z-index,auto)}.svg-inline--fa.fa-stack-1x{height:1em;width:1.25em}.svg-inline--fa.fa-stack-2x{height:2em;width:2.5em}.fa-inverse{color:var(--fa-inverse,#fff)}.fa-sr-only,.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}.fa-sr-only-focusable:not(:focus),.sr-only-focusable:not(:focus){position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}.svg-inline--fa .fa-primary{fill:var(--fa-primary-color,currentColor);opacity:var(--fa-primary-opacity,1)}.svg-inline--fa .fa-secondary{fill:var(--fa-secondary-color,currentColor);opacity:var(--fa-secondary-opacity,.4)}.svg-inline--fa.fa-swap-opacity .fa-primary{opacity:var(--fa-secondary-opacity,.4)}.svg-inline--fa.fa-swap-opacity .fa-secondary{opacity:var(--fa-primary-opacity,1)}.svg-inline--fa mask .fa-primary,.svg-inline--fa mask .fa-secondary{fill:#000}.fa-duotone.fa-inverse,.fad.fa-inverse{color:var(--fa-inverse,#fff)}';return\"fa\"===a&&z===s||(c=new RegExp(\"\\\\.\".concat(\"fa\",\"\\\\-\"),\"g\"),l=new RegExp(\"\\\\--\".concat(\"fa\",\"\\\\-\"),\"g\"),s=new RegExp(\"\\\\.\".concat(s),\"g\"),e=e.replace(c,\".\".concat(a,\"-\")).replace(l,\"--\".concat(a,\"-\")).replace(s,\".\".concat(z))),e}var V1=!1;function r1(){Q.autoAddCss&&!V1&&(function(c){if(c&&L){var l=v.createElement(\"style\");l.setAttribute(\"type\",\"text/css\"),l.innerHTML=c;for(var s=v.head.childNodes,a=null,z=s.length-1;-1\").concat(a.map(o1).join(\"\"),\"\")}function f1(c,l,s){if(c&&c[l]&&c[l][s])return{prefix:l,iconName:s,icon:c[l][s]}}L&&((i1=(v.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(v.readyState))||v.addEventListener(\"DOMContentLoaded\",M1));function C1(c,l,s,a){for(var z,e,H=Object.keys(c),t=H.length,V=void 0!==a?v1(l,a):l,r=void 0===s?(z=1,c[H[0]]):(z=0,s);z {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))"],"names":["c","l","window","document","a","s","navigator","userAgent","z","e","H","Object","keys","getOwnPropertySymbols","filter","getOwnPropertyDescriptor","enumerable","push","apply","t","arguments","length","forEach","V","getOwnPropertyDescriptors","defineProperties","defineProperty","value","configurable","writable","r","Array","documentElement","head","addEventListener","createElement","indexOf","M","h","n","i","m","o","Proxy","get","C","fa","fas","far","fal","fat","fad","fab","fak","fakd","fass","fasr","fasl","fast","f","v","solid","regular","light","thin","duotone","brands","kit","normal","concat","Set","map","add","bind","isArray","Symbol","iterator","from","prototype","toString","call","slice","constructor","name","test","TypeError","styles","hooks","shims","L","u","reduce","icon","iconName","d","skipHooks","addPack","p","monero","hooli","yelp","lastfm","shopware","aws","redhat","yoast","cloudflare","ups","pixiv","wpexplorer","dyalog","bity","stackpath","buysellads","modx","guilded","vnv","microsoft","qq","orcid","java","invision","centercode","drupal","jxl","unity","whmcs","rocketchat","vk","untappd","mailchimp","contao","deskpro","brave","sistrix","edge","threads","napster","artstation","markdown","sourcetree","diaspora","foursquare","pagelines","algolia","safari","google","atlassian","nimblr","chromecast","evernote","adversal","fonticons","weixin","shirtsinbulk","codepen","lyft","rev","windows","meetup","centos","adn","cloudsmith","opensuse","codiepie","node","mix","steam","scribd","debian","openid","instalod","expeditedssl","sellcast","delicious","freebsd","vuejs","accusoft","ioxhost","golang","kickstarter","grav","weibo","uncharted","firstdraft","wpressr","angellist","skype","joget","fedora","meta","laravel","hotjar","hips","behance","reddit","discord","chrome","wpbeginner","confluence","shoelace","mdb","dochub","ebay","amazon","unsplash","yarn","asymmetrik","gratipay","apple","hive","gitkraken","keybase","padlet","stumbleupon","fedex","shopify","neos","hackerrank","researchgate","swift","angular","speakap","angrycreative","empire","envira","studiovinari","wordpress","firefox","linode","goodreads","jsfiddle","sith","themeisle","page4","hashnode","react","squarespace","bitcoin","keycdn","opera","umbraco","ubuntu","draft2digital","stripe","houzz","gg","dhl","xing","blackberry","playstation","quinscape","less","opencart","vine","paypal","gitlab","typo3","yahoo","dailymotion","affiliatetheme","bootstrap","odnoklassniki","mintbit","ethereum","patreon","avianex","ello","gofore","bimobject","mandalorian","osi","periscope","fulcrum","cloudscale","forumbee","mizuni","schlix","bandcamp","wpforms","cloudversify","usps","megaport","magento","spotify","fly","aviato","itunes","cuttlefish","blogger","flickr","viber","soundcloud","digg","letterboxd","symfony","maxcdn","etsy","audible","bilibili","erlang","dashcube","elementor","palfed","superpowers","resolving","xbox","searchengin","tiktok","renren","linux","glide","linkedin","hubspot","deploydog","twitch","ravelry","mixer","vimeo","mendeley","uniregistry","figma","dropbox","instagram","cmplid","upwork","facebook","gripfire","uikit","phabricator","ussunnah","earlybirds","autoprefixer","whatsapp","slideshare","viadeo","line","servicestack","simplybuilt","bitbucket","imdb","deezer","jira","docker","screenpal","bluetooth","gitter","microblog","yandex","readme","html5","sellsy","sass","wirsindhandwerk","buromobelexperte","salesforce","medapps","ns8","apper","waze","bluesky","snapchat","rust","wix","supple","webflow","rebel","css3","staylinked","kaggle","deviantart","cpanel","trello","perbyte","grunt","weebly","connectdevelop","leanpub","themeco","python","android","bots","hornbill","js","ideal","git","dev","sketch","uber","github","php","alipay","youtube","skyatlas","replyd","suse","jenkins","twitter","rockrms","pinterest","buffer","npm","yammer","btc","dribbble","stubber","telegram","odysee","slack","medrt","usb","tumblr","vaadin","quora","reacteurope","medium","amilia","mixcloud","flipboard","viacoin","sitrox","discourse","joomla","mastodon","airbnb","gulp","strava","ember","teamspeak","pushed","nutritionix","wodu","intercom","zhihu","korvue","pix","message","comments","paste","compass","lightbulb","flag","futbol","hand","bookmark","folder","user","star","clipboard","image","lemon","handshake","gem","registered","square","snowflake","newspaper","heart","circle","eye","comment","envelope","hourglass","clock","keyboard","images","sun","bell","file","hospital","copy","copyright","building","moon","calendar","clone","at","stethoscope","info","explosion","ring","volleyball","atom","soap","icons","fingerprint","football","crop","person","laptop","menorah","bong","spoon","pager","strikethrough","k","pencil","backward","blog","w","rainbow","paw","cloud","gavel","binoculars","motorcycle","scissors","table","clover","reply","helicopter","swatchbook","bars","film","sitemap","memory","hanukiah","feather","compress","ankh","asterisk","heading","ghost","list","gamepad","egg","campground","paintbrush","lock","tree","microscope","sink","mitten","users","om","worm","plug","stopwatch","stamp","stairs","pills","tooth","bicycle","snowman","school","igloo","joint","horse","q","g","capsules","bullseye","bacon","radiation","vial","gauge","dumpster","highlighter","key","bullhorn","globe","synagogue","ban","repeat","cross","box","maximize","shapes","shuffle","spider","slash","server","venus","passport","microchip","crown","wheelchair","fire","city","unlock","headset","wifi","bath","underline","signature","stroopwafel","bold","jedi","gift","glasses","train","crow","sailboat","frog","bucket","microphone","cow","screwdriver","dolly","smoking","minimize","monument","snowplow","cannabis","tablets","ethernet","chair","icicles","neuter","marker","volcano","viruses","certificate","suitcase","scroll","spa","pause","bomb","subscript","burst","smog","crutch","palette","vest","ferry","seedling","children","chalkboard","virus","archway","couch","italic","church","democrat","locust","sort","language","question","code","leaf","road","taxi","poop","kaaba","eject","tarp","cube","elevator","recycle","trademark","basketball","wallet","burger","wrench","bugs","bridge","cat","route","panorama","tags","terminal","tape","pen","signal","bus","prescription","shop","vihara","diamond","bacterium","biohazard","phone","trash","poo","shirt","cubes","divide","headphones","republican","ruler","restroom","j","otter","child","satellite","tag","paperclip","ribbon","lungs","indent","mountain","camera","meteor","sleigh","water","braille","landmark","truck","crosshairs","tent","cookie","dumbbell","dna","minus","chess","gear","mosque","mosquito","vials","pallet","faucet","timeline","coins","khanda","sliders","hamsa","flask","ticket","tty","calculator","ship","download","forward","mobile","outdent","house","b","utensils","skull","stop","upload","hurricane","mound","caravan","bolt","vault","mars","toilet","guitar","industry","ellipsis","toolbox","bug","car","medal","bed","podcast","superscript","droplet","eraser","dove","socks","inbox","section","dharmachakra","hotdog","drum","fax","paragraph","link","play","font","receipt","tv","shrimp","wind","y","fish","clapperboard","baseball","grip","gun","plus","expand","computer","xmark","baby","tractor","equals","blender","teeth","rocket","store","tablet","fill","bacteria","notdef","disease","genderless","retweet","radio","thermometer","percent","display","thumbtack","trophy","hammer","rotate","spinner","robot","peace","gears","warehouse","splotch","transgender","mercury","award","qrcode","shield","tents","dog","carrot","cheese","music","broom","gopuram","hashtag","hippo","infinity","voicemail","fan","trailer","bahai","dragon","tornado","anchor","rss","shower","desktop","book","check","briefcase","rug","handcuffs","database","share","dungeon","hands","dice","brain","bandage","gifts","hotel","brush","mask","syringe","magnet","jar","bone","plane","exclamation","print","x","umbrella","trowel","stapler","barcode","video","next","done","return","mark","measure","MutationObserver","performance","S","A","Z","O","P","N","E","I","T","D","Y","R","F","_","W","U","GROUP","SWAP_OPACITY","PRIMARY","SECONDARY","X","B","FontAwesomeConfig","querySelector","getAttribute","styleDefault","familyDefault","cssPrefix","replacementClass","autoReplaceSvg","autoAddCss","autoA11y","searchPseudoElements","observeMutations","mutateApproach","keepOriginalSource","measurePerformance","showMissingIcons","familyPrefix","G","Q","set","K","$","J","size","flipX","flipY","c1","l1","Math","random","s1","a1","classList","split","z1","replace","e1","trim","H1","t1","RegExp","V1","r1","setAttribute","innerHTML","childNodes","tagName","toUpperCase","insertBefore","mixout","dom","css","insertCss","beforeDOMElementCreation","beforeI2svg","h1","n1","i1","m1","setTimeout","o1","attributes","join","f1","prefix","C1","v1","doScroll","readyState","M1","removeEventListener","L1","charCodeAt","u1","d1","p1","b1","g1","values","w1","k1","y1","S1","A1","x1","q1","O1","autoFetchSvg","names","unicodes","I1","family","j1","P1","N1","T1","D1","skipLookups","includes","some","Z1","rest","this","definitions","_pullDefinitions","Y1","R1","F1","_1","W1","U1","X1","B1","G1","noAuto","config","i2svg","Promise","reject","watch","autoReplaceSvgRoot","Q1","parse","match","library","findIconDefinition","toHtml","K1","abstract","html","$1","main","transform","symbol","title","maskId","titleId","extra","watchable","found","width","height","classes","class","role","xmlns","viewBox","id","style","J1","content","startCentered","c2","l2","s2","a2","console","error","z2","e2","H2","t2","V2","begin","end","r2","M2","h2","createElementNS","n2","i2","parentNode","ceFn","createTextNode","appendChild","createComment","outerHTML","replaceChild","remove","nest","toSvg","toNode","removeAttribute","m2","o2","requestAnimationFrame","f2","C2","v2","L2","u2","treeCallback","nodeCallback","pseudoElementsCallback","observeMutationsRoot","type","addedNodes","target","attributeName","contains","observe","childList","characterData","subtree","p2","styleParser","innerText","firstChild","nodeType","Node","TEXT_NODE","data","d2","focusable","b2","g2","w2","k2","resolve","querySelectorAll","all","then","catch","y2","S2","iconDefinition","params","mutationObserverCallbacks","provides","callback","generateSvgReplacementMutation","generateAbstractIcon","containerWidth","iconWidth","layer","assembler","counter","text","generateLayersText","parseInt","getComputedStyle","fontSize","getBoundingClientRect","A2","x2","q2","getPropertyValue","removeChild","toLowerCase","isSecondary","startsWith","Z2","O2","j2","P2","parseFloat","isNaN","T2","N2","E2","I2","pseudoElements2svg","unwatch","disconnect","parseNodeAttributes","generateAbstractTransformGrouping","outer","inner","path","generateAbstractMask","maskUnits","maskContentUnits","matchMedia","matches","missingIconAbstract","attributeType","repeatCount","dur","cx","cy","opacity","FontAwesome","addPacks","addShims","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","exports","module","__webpack_modules__","getter","__esModule","definition","obj","prop","hasOwnProperty"],"sourceRoot":""} \ No newline at end of file diff --git a/docs/_static/sphinx-dropdown.css b/docs/_static/sphinx-dropdown.css new file mode 100644 index 00000000..da8a5a6c --- /dev/null +++ b/docs/_static/sphinx-dropdown.css @@ -0,0 +1,94 @@ +.octicon { + display: inline-block; + vertical-align: text-top; + fill: currentColor; +} +details.dropdown .summary-title { + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + /* don't overlap the chevron */ + padding-right: 3em!important; + } +details.dropdown:hover { + cursor: pointer; +} +details.dropdown .summary-content { + cursor: default; +} +details.dropdown summary { + padding: 1em; + /* hide the default triangle marker */ + list-style: none; +} +/* chrome doesn't yet support list-style */ +details.dropdown summary::-webkit-details-marker { + display: none; +} +details.dropdown summary:focus { + outline: none; +} +details.dropdown summary:hover .summary-up svg, +details.dropdown summary:hover .summary-down svg { + opacity: 1; +} +details.dropdown .summary-up svg, +details.dropdown .summary-down svg { + opacity: 0.6; +} +details.dropdown .summary-up, +details.dropdown .summary-down { + pointer-events: none; + position: absolute; + top: 0.75em; + right: 1em; +} +details.dropdown .summary-up svg, +details.dropdown .summary-down svg { + display: block; +} +details.dropdown[open] .summary-down{ + visibility: hidden; + /* z-index: -1; */ +} +details.dropdown:not([open]) .summary-up{ + visibility: hidden; + /* z-index: -1; */ +} + +/* Ellipsis added when no title */ +details.dropdown summary .octicon.no-title { + vertical-align: middle; +} +details.dropdown[open] summary .octicon.no-title{ + visibility: hidden; +} + +/* Transition animation */ +details.dropdown.fade-in[open] summary ~ * { + animation: fade-in .5s ease-in-out; + -moz-animation: fade-in .5s ease-in-out ; + -webkit-animation: fade-in .5s ease-in-out +} +details.dropdown.fade-in-slide-down[open] summary ~ * { + animation: fade-in .5s ease-in-out, slide-down .5s ease-in-out; + -moz-animation: fade-in .5s ease-in-out, slide-down .5s ease-in-out ; + -webkit-animation: fade-in .5s ease-in-out, slide-down .5s ease-in-out +} +@keyframes fade-in { + 0% { + opacity: 0; + } + 100% { + opacity: 1; + } +} +@keyframes slide-down { + 0% { + transform: translate(0px, -10px) + } + 100% { + transform: translate(0px, 0px) + } +} diff --git a/docs/_static/sphinx_highlight.js b/docs/_static/sphinx_highlight.js index aae669d7..8a96c69a 100644 --- a/docs/_static/sphinx_highlight.js +++ b/docs/_static/sphinx_highlight.js @@ -29,14 +29,19 @@ const _highlight = (node, addItems, text, className) => { } span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); parent.insertBefore( span, parent.insertBefore( - document.createTextNode(val.substr(pos + text.length)), + rest, node.nextSibling ) ); node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); if (isInSVG) { const rect = document.createElementNS( @@ -140,5 +145,10 @@ const SphinxHighlight = { }, }; -_ready(SphinxHighlight.highlightSearchWords); -_ready(SphinxHighlight.initEscapeListener); +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/_static/styles/bootstrap.css.map b/docs/_static/styles/bootstrap.css.map new file mode 100644 index 00000000..6af30c41 --- /dev/null +++ b/docs/_static/styles/bootstrap.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/bootstrap.css","mappings":"AAAA;;;;ECCE,CCDF,4BASI,qQAIA,sMAIA,iKAIA,sNAIA,iRAIA,iPAIA,iRAGF,2BACA,qBAMA,yMACA,mGACA,4EAOA,gDC2OI,wBALI,CDpOR,0BACA,0BAKA,wBACA,6BACA,kBACA,6BAEA,yBACA,8BAEA,wCACA,kCACA,0BACA,kCAEA,sCACA,iCACA,yBACA,iCAGA,2BAEA,wBACA,+BACA,+BAEA,8BACA,oCAMA,wBACA,6BACA,0BAGA,sBACA,wBACA,0BACA,+CAEA,4BACA,8BACA,6BACA,2BACA,4BACA,mDACA,8BAGA,8CACA,uDACA,gDACA,uDAIA,gCACA,0BACA,8CAIA,8BACA,qCACA,gCACA,uCEhHE,qBFyHA,wBACA,gCACA,qBACA,0BAEA,yBACA,oCAEA,2CACA,qCACA,0BACA,+BAEA,yCACA,oCACA,yBACA,8BAGE,iRAIA,iPAIA,iRAGF,2BAEA,wBACA,8BACA,gCACA,sCAEA,wBACA,6BACA,0BAEA,0BACA,kDAEA,8BACA,qCACA,gCACA,uCAlDA,iBAkDA,CGxKJ,iBAGE,sBAeE,6CANJ,MAOM,wBAcN,KASE,8BACA,0CAFA,mCAFA,2BAJA,uCF6OI,kCALI,CEtOR,uCACA,uCAJA,SAMA,oCAGA,CASF,GAGE,SACA,wCAFA,aCmnB4B,CDpnB5B,cAIA,WCynB4B,CD/mB9B,0CAOE,8BAFA,eCwjB4B,CDvjB5B,eCwjB4B,CD5jB5B,mBCwjB4B,CDzjB5B,YAMA,CAGF,OFuMQ,iCA5JJ,yBE3CJ,OF8MQ,kBEzMR,OFkMQ,gCA5JJ,yBEtCJ,OFyMQ,gBEpMR,OF6LQ,8BA5JJ,yBEjCJ,OFoMQ,mBE/LR,OFwLQ,gCA5JJ,yBE5BJ,OF+LQ,kBE1LR,OF+KM,iBALI,CErKV,OF0KM,cALI,CE1JV,EAEE,mBADA,YCyV0B,CD9U5B,YAEE,YADA,iCAEA,8BAMF,QAEE,kBACA,oBAFA,kBAEA,CAMF,MAEE,kBAGF,SAIE,mBADA,YACA,CAGF,wBAIE,gBAGF,GACE,eC6b4B,CDxb9B,GACE,oBACA,cAMF,WACE,gBAQF,SAEE,kBCsa4B,CD9Z9B,aF6EM,gBALI,CEjEV,WAGE,wCADA,gCADA,eAEA,CASF,QF0DM,eALI,CEjDR,cAFA,kBAGA,wBAGF,kBACA,cAKA,EACE,8DACA,yBCgNwC,CD9MxC,QACE,mDAWF,4DAEE,cACA,qBAOJ,kBAIE,oCCgV4B,CHlUxB,aALI,CEDV,IACE,cFKI,iBEHJ,mBADA,aAEA,aFHQ,CEQR,SAEE,cFLE,iBALI,CEWN,kBAIJ,KAGE,qBADA,2BFZI,gBEaJ,CAGA,OACE,cAIJ,IAIE,qCCy5CkC,CC9rDhC,qBFoSF,uBCy5CkC,CHj7C9B,gBALI,CE2BR,wBElSE,CFwSF,QF5BI,cE6BF,SFlCM,CE6CV,OACE,gBAMF,QAEE,sBAQF,MAEE,yBADA,mBACA,CAGF,QAGE,+BC4Z4B,CD7Z5B,oBC2X4B,CD5X5B,iBC4X4B,CDzX5B,gBAOF,GAEE,mBACA,gCAGF,2BAQE,cAAa,CAFb,oBAEA,CAQF,MACE,qBAMF,OAEE,gBAQF,iCACE,UAKF,sCAME,oBF5HI,iBALI,CEmIR,oBAHA,QAGA,CAIF,cAEE,oBAKF,cACE,eAGF,OAGE,iBAGA,gBACE,UAOJ,0IACE,uBAQF,gDAIE,0BAGE,4GACE,eAON,mBAEE,kBADA,SACA,CAKF,SACE,gBAUF,SAIE,QAAO,CADP,SAFA,YACA,SAEA,CAQF,OACE,WF9MM,gCEoNN,oBAHA,mBCmN4B,CDpN5B,UADA,UAKA,CFhXE,yBEyWJ,OFtMQ,kBE+MN,SACE,WAOJ,+OAOE,UAGF,4BACE,YASF,cACE,6BACA,oBAmBF,4BACE,wBAKF,+BACE,UAOF,uBAEE,0BADA,YACA,CAKF,OACE,qBAKF,OACE,SAOF,QAEE,eADA,iBACA,CAQF,SACE,wBAQF,SACE,uBGrkBF,MLmQM,iBALI,CK5PR,eFwoB4B,CEnoB5B,WLgQM,iCK5PJ,eFynBkB,CExnBlB,eFwmB0B,CHzgB1B,yBKpGF,WLuQM,gBKvQN,WLgQM,iCK5PJ,eFynBkB,CExnBlB,eFwmB0B,CHzgB1B,yBKpGF,WLuQM,kBKvQN,WLgQM,iCK5PJ,eFynBkB,CExnBlB,eFwmB0B,CHzgB1B,yBKpGF,WLuQM,gBKvQN,WLgQM,iCK5PJ,eFynBkB,CExnBlB,eFwmB0B,CHzgB1B,yBKpGF,WLuQM,kBKvQN,WLgQM,iCK5PJ,eFynBkB,CExnBlB,eFwmB0B,CHzgB1B,yBKpGF,WLuQM,gBKvQN,WLgQM,iCK5PJ,eFynBkB,CExnBlB,eFwmB0B,CHzgB1B,yBKpGF,WLuQM,kBK1OR,4BC3DE,gBADA,cACA,CD8DF,kBACE,qBAEA,mCACE,kBFsoB0B,CE5nB9B,YL8MM,gBALI,CKvMR,yBAIF,YLwMM,kBKvMJ,kBLkMQ,CK/LR,wBACE,gBAIJ,mBAIE,cL2LI,gBALI,CKxLR,kBEjGO,CFgGP,gBFnFS,CEwFT,0BACE,aG1FJ,0BCCE,YAHA,cAGA,CDDF,eAEE,kCL+jDkC,CK9jDlC,2DJGE,sCILF,cCAA,CDcF,QAEE,qBAGF,YAEE,aAAY,CADZ,mBACA,CAGF,gBAEE,gCRuPI,gBG2zC8B,COplDlC,oFCHA,qBACA,gBAKA,iBADA,kBADA,yCADA,0CADA,UAIA,CCsDE,wBF5CE,yBACE,eHlBe,EK6DnB,wBF5CE,uCACE,eHlBe,EK6DnB,wBF5CE,qDACE,eHlBe,EK6DnB,yBF5CE,mEACE,gBHlBe,EMEvB,MAEI,0HAKF,KCNA,qBACA,gBACA,aACA,eAIA,yCADA,0CADA,sCAEA,CDEE,OCOF,cAKA,8BAHA,eAEA,yCADA,0CAFA,UAIA,CA+CI,KACE,YAGF,iBApCJ,cACA,WAcA,cACE,cACA,WAFF,cACE,cACA,UAFF,cACE,cACA,mBAFF,cACE,cACA,UAFF,cACE,cACA,UAFF,cACE,cACA,mBA+BE,UAhDJ,cACA,WAqDQ,OAhEN,cACA,kBA+DM,OAhEN,cACA,mBA+DM,OAhEN,cACA,UA+DM,OAhEN,cACA,mBA+DM,OAhEN,cACA,mBA+DM,OAhEN,cACA,UA+DM,OAhEN,cACA,mBA+DM,OAhEN,cACA,mBA+DM,OAhEN,cACA,UA+DM,QAhEN,cACA,mBA+DM,QAhEN,cACA,mBA+DM,QAhEN,cACA,WAuEQ,UAxDV,wBAwDU,UAxDV,yBAwDU,UAxDV,gBAwDU,UAxDV,yBAwDU,UAxDV,yBAwDU,UAxDV,gBAwDU,UAxDV,yBAwDU,UAxDV,yBAwDU,UAxDV,gBAwDU,WAxDV,yBAwDU,WAxDV,yBAmEM,WAEE,gBAGF,WAEE,gBAPF,WAEE,sBAGF,WAEE,sBAPF,WAEE,qBAGF,WAEE,qBAPF,WAEE,mBAGF,WAEE,mBAPF,WAEE,qBAGF,WAEE,qBAPF,WAEE,mBAGF,WAEE,mBF1DN,wBEUE,QACE,YAGF,oBApCJ,cACA,WAcA,iBACE,cACA,WAFF,iBACE,cACA,UAFF,iBACE,cACA,mBAFF,iBACE,cACA,UAFF,iBACE,cACA,UAFF,iBACE,cACA,mBA+BE,aAhDJ,cACA,WAqDQ,UAhEN,cACA,kBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,WAuEQ,aAxDV,cAwDU,aAxDV,wBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,cAxDV,yBAwDU,cAxDV,yBAmEM,iBAEE,gBAGF,iBAEE,gBAPF,iBAEE,sBAGF,iBAEE,sBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,mBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,oBF1DN,wBEUE,QACE,YAGF,oBApCJ,cACA,WAcA,iBACE,cACA,WAFF,iBACE,cACA,UAFF,iBACE,cACA,mBAFF,iBACE,cACA,UAFF,iBACE,cACA,UAFF,iBACE,cACA,mBA+BE,aAhDJ,cACA,WAqDQ,UAhEN,cACA,kBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,WAuEQ,aAxDV,cAwDU,aAxDV,wBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,cAxDV,yBAwDU,cAxDV,yBAmEM,iBAEE,gBAGF,iBAEE,gBAPF,iBAEE,sBAGF,iBAEE,sBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,mBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,oBF1DN,wBEUE,QACE,YAGF,oBApCJ,cACA,WAcA,iBACE,cACA,WAFF,iBACE,cACA,UAFF,iBACE,cACA,mBAFF,iBACE,cACA,UAFF,iBACE,cACA,UAFF,iBACE,cACA,mBA+BE,aAhDJ,cACA,WAqDQ,UAhEN,cACA,kBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,WAuEQ,aAxDV,cAwDU,aAxDV,wBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,cAxDV,yBAwDU,cAxDV,yBAmEM,iBAEE,gBAGF,iBAEE,gBAPF,iBAEE,sBAGF,iBAEE,sBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,mBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,oBF1DN,yBEUE,QACE,YAGF,oBApCJ,cACA,WAcA,iBACE,cACA,WAFF,iBACE,cACA,UAFF,iBACE,cACA,mBAFF,iBACE,cACA,UAFF,iBACE,cACA,UAFF,iBACE,cACA,mBA+BE,aAhDJ,cACA,WAqDQ,UAhEN,cACA,kBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,mBA+DM,UAhEN,cACA,UA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,mBA+DM,WAhEN,cACA,WAuEQ,aAxDV,cAwDU,aAxDV,wBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,aAxDV,yBAwDU,aAxDV,yBAwDU,aAxDV,gBAwDU,cAxDV,yBAwDU,cAxDV,yBAmEM,iBAEE,gBAGF,iBAEE,gBAPF,iBAEE,sBAGF,iBAEE,sBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,mBAPF,iBAEE,qBAGF,iBAEE,qBAPF,iBAEE,mBAGF,iBAEE,oBCrHV,OAEE,8BACA,2BACA,+BACA,4BAEA,0CACA,gCACA,+CACA,iCACA,kDACA,8DACA,iDACA,4DACA,gDACA,6DAKA,0CAFA,kBRtBO,CQuBP,kBZusB4B,CYzsB5B,UAGA,CAOA,yBAIE,oCACA,0CZ+sB0B,CY9sB1B,yGAHA,mFAFA,aAKA,CAGF,aACE,uBAGF,aACE,sBAIJ,qBACE,gDAOF,aACE,iBAUA,4BACE,eAeF,gCACE,sCAGA,kCACE,sCAOJ,oCACE,sBAGF,qCACE,mBAkBF,kGACE,oDACA,8CAQJ,cACE,oDACA,8CAQA,8BACE,mDACA,6CC5IF,eAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CAlBF,gCAkBE,0CADA,2BACA,CAlBF,iBAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CAlBF,eAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CAlBF,2BAkBE,0CADA,2BACA,CAlBF,YAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CAlBF,eAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CAlBF,6BAkBE,0CADA,2BACA,CAlBF,cAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CAlBF,aAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CAlBF,yBAkBE,0CADA,2BACA,CAlBF,YAOE,sBACA,sBACA,gCACA,8BACA,8BACA,6BACA,6BACA,4BACA,2BAGA,CDiJA,kBAEE,iCADA,eACA,CH3FF,2BGyFA,qBAEE,iCADA,eACA,EH3FF,2BGyFA,qBAEE,iCADA,eACA,EH3FF,2BGyFA,qBAEE,iCADA,eACA,EH3FF,4BGyFA,qBAEE,iCADA,eACA,EEnKN,YACE,mBdu2BsC,Cc91BxC,gBjBiRM,iBALI,CiBrQR,gBAJA,gBADA,sDADA,kDdqmB4B,Cc3lB9B,mBjBsQM,kBiBpQJ,oDADA,gDjBgQQ,CiB3PV,mBjBgQM,kBiB9PJ,qDADA,iDjB0PQ,CkBtRV,WAKE,gClBsRI,gBALI,CkBrRR,iBfm2BsC,CgBp2BxC,cASE,gBAEA,4BADA,kChBq3BsC,CgBn3BtC,2DfGE,sCePF,0BhB43BsC,CgBn4BtC,cnB0RI,cALI,CmBhRR,ehBkmB4B,CgBjmB5B,ehBymB4B,CgB7mB5B,uBCSI,qEDVJ,UAgBA,CCFI,sCDhBN,cCiBQ,iBDGN,yBACE,gBAEA,wDACE,eAKJ,oBAEE,kChBg2BoC,CgB/1BpC,oBhB82BoC,CgBx2BlC,kDARF,0BhBs2BoC,CgBn2BpC,SZXoB,CYoBtB,2CAYE,aAKA,QAAO,CAXP,cAWA,CAKF,qCACE,cACA,UAIF,2BACE,+BhB40BoC,CgB10BpC,UAQF,uBAEE,uChB8yBoC,CgB3yBpC,UAIF,oCE1FA,sClBqiCgC,CgBl8B9B,eAFA,qBAGA,8ChBgsB0B,CgB/rB1B,gBAPA,0BhBsyBoC,CgBxyBpC,wBACA,wBhBorB0B,CgBtrB1B,uBAKA,oBCpFE,6HD0FF,CCtFE,sCD0EJ,oCCzEM,iBDwFN,yEACE,uChB47B8B,CgBn7BlC,wBAOE,6BACA,yBACA,sCAHA,0BhB2xBsC,CgBhyBtC,cAIA,ehBwf4B,CgBzf5B,gBADA,kBADA,UAOA,CAEA,8BACE,UAGF,gFAGE,cAAa,CADb,eACA,CAWJ,iBfjII,yCJ4QE,iBALI,CmBrIR,yDhB4wBsC,CgB3wBtC,oBfnIE,CeuIF,uCAEE,sBACA,wBAFA,oBhBsoB0B,CgBhoB9B,iBf9II,yCJ4QE,iBALI,CmBxHR,wDhBgwBsC,CgB/vBtC,kBfhJE,CeoJF,uCAEE,oBACA,uBAFA,kBhB6nB0B,CgBnnB5B,sBACE,0DhB6uBoC,CgB1uBtC,yBACE,yDhB0uBoC,CgBvuBtC,yBACE,wDhBuuBoC,CgBluBxC,oBAEE,sDhB8tBsC,CgB7tBtC,gBAFA,UhBmlB4B,CgB/kB5B,mDACE,eAGF,uCACE,mBfvLA,sCe2LF,0CACE,mBf5LA,sCegMF,yFhB8sBsC,CgB7sBtC,wFhB8sBsC,CmB75BxC,aACE,sQAUA,gBACA,kCnBk3BsC,CmBj3BtC,iFAEA,uCnB+9BkC,CmBh+BlC,4BAEA,yBnB+9BkC,CmB99BlC,2DlBHE,sCkBJF,0BnBy3BsC,CmBh4BtC,ctBuRI,cALI,CsB7QR,enB+lB4B,CmB9lB5B,enBsmB4B,CmB1mB5B,uCFMI,qEEPJ,UAgBA,CFLI,sCEfN,aFgBQ,iBEMN,mBACE,oBnBs3BoC,CmBh3BlC,kDALF,SnBs+B8B,CmB79BhC,0DAGE,sBADA,oBACA,CAGF,sBAEE,uCnBu1BoC,CmBl1BtC,4BACE,kBACA,uCAIJ,gBlBtCI,yCJ4QE,iBALI,CsB/NR,qBnBquB4B,CmBpuB5B,kBnBquB4B,CmBvuB5B,kBlBvCE,CkB8CJ,gBlB9CI,yCJ4QE,iBALI,CsBvNR,oBnBiuB4B,CmBhuB5B,iBnBiuB4B,CmBnuB5B,iBlB/CE,CkBwDA,kCACE,sQCxEN,YACE,cAGA,sBAFA,iBpBq6BwC,CoBp6BxC,kBpBs6BwC,CoBn6BxC,8BACE,WACA,mBAIJ,oBAEE,eADA,mBpB25BwC,CoBz5BxC,iBAEA,sCACE,YAEA,aAAY,CADZ,mBACA,CAIJ,kBACE,qCAOA,gBACA,yCACA,+CAEA,wBADA,4BAEA,wBACA,0DpB24BwC,CoBt5BxC,cAEA,UpBy4BwC,CoBx4BxC,iBASA,yBARA,mBAHA,SAWA,CAGA,iCnB3BE,oBmB+BF,8BAEE,iBpBm4BsC,CoBh4BxC,yBACE,sBpB03BsC,CoBv3BxC,wBACE,oBpBs1BoC,CoBp1BpC,kDADA,ShBnCoB,CgBuCtB,0BACE,wBpB5BM,CoB6BN,oBpB7BM,CoB+BN,yCAII,qQAIJ,sCAII,6KAKN,+CAOI,+PANF,wBpBjDM,CoBkDN,oBAKE,CAIJ,2BAEE,YACA,WAFA,mBpBo2BuC,CoB31BvC,2FACE,eACA,UpBy1BqC,CoB30B3C,aACE,kBpBo1BgC,CoBl1BhC,+BACE,qLAIA,0CACA,sBnBjHA,kBmB+GA,mBHlHE,gDGiHF,SAKA,CHlHE,sCG0GJ,+BHzGM,iBGmHJ,qCACE,2KAGF,uCAMI,wKALF,wBAKE,CAKN,gCAEE,cAAa,CADb,mBACA,CAEA,kDAEE,aAAY,CADZ,mBACA,CAKN,mBACE,qBACA,iBpBsyBgC,CoBnyBlC,WAEE,mBACA,oBAFA,iBAEA,CAIE,mDAEE,YACA,YAFA,mBpBwpBwB,CoB/oB1B,8EACE,2LCnLN,YAIE,gBACA,6BAHA,gBACA,UAFA,UAIA,CAEA,kBACE,UAIA,wGrB8gCuC,CqB7gCvC,oGrB6gCuC,CqB1gCzC,8BACE,SAGF,kCAIE,gBH1BF,wBlBkCQ,CqBNN,QrB6/BuC,CC1gCvC,mBoBSA,WrB8/BuC,CqB7/BvC,mBJbE,uGIWF,UAQA,CJfE,sCIMJ,kCJLM,iBIgBJ,yCHjCF,wBlB8hCyC,CqBx/BzC,2CAKE,uCrBu+B8B,CqBt+B9B,yBpB7BA,mBoB0BA,kBACA,crBu+B8B,CqBz+B9B,YrBw+B8B,CqBz+B9B,UpBxBA,CoBkCF,8BAGE,gBHpDF,wBlBkCQ,CqBoBN,QrBm+BuC,CC1gCvC,mBoBoCA,WrBm+BuC,CiB1gCrC,uGIsCF,UAOA,CJzCE,sCIiCJ,8BJhCM,iBI0CJ,qCH3DF,wBlB8hCyC,CqB99BzC,8BAKE,uCrB68B8B,CqB58B9B,yBpBvDA,mBoBoDA,kBACA,crB68B8B,CqB/8B9B,YrB88B8B,CqB/8B9B,UpBlDA,CoB4DF,qBACE,oBAEA,2CACE,0CrBg9BqC,CqB78BvC,uCACE,0CrB48BqC,CsBniC3C,eACE,kBAEA,gGAGE,8CtBwiCoC,CsBtiCpC,iBADA,kDtBwiCoC,CsBpiCtC,qBAYE,gDAPA,YAFA,OAIA,gBADA,oBAKA,oBAVA,kBAOA,iBACA,uBAPA,MAWA,qBLRE,6DKKF,mBANA,SAUA,CLLE,sCKTJ,qBLUM,iBKON,oEAEE,oBAEA,8FACE,kBAGF,oMAGE,uBADA,oBtB6gCkC,CsBzgCpC,sGAEE,uBADA,oBtBwgCkC,CsBngCtC,4BAEE,uBADA,oBtBkgCoC,CsB1/BpC,mLACE,yCACA,0DtB2/BkC,CsBz/BlC,2MAME,kCtBg0BgC,CCh3BpC,sCqB+CI,WADA,YtBm/BgC,CsBr/BhC,mBADA,kBAEA,UrB7CJ,CqBuDA,oDACE,yCACA,0DtB0+BkC,CsBr+BpC,6CACE,sCAIJ,2EAEE,atB1EO,CsB4EP,uFACE,uCtB0yBkC,CuBj4BxC,aAIE,oBAFA,aACA,eAFA,kBAIA,WAEA,iFAIE,cAEA,WAAU,CAHV,kBAEA,QACA,CAIF,0GAGE,UAMF,kBACE,kBACA,UAEA,wBACE,UAWN,kBAEE,mBAQA,sCvB06BsC,CuBz6BtC,2DtBtCE,sCsBkCF,0BvBm1BsC,CuBz1BtC,a1BgPI,cALI,C0BvOR,evByjB4B,CuBxjB5B,evBgkB4B,CuBnkB5B,uBAKA,kBACA,kBtBpCE,CsBgDJ,kHtBhDI,yCJ4QE,iBALI,C0BnNR,kBtBpDE,CsByDJ,kHtBzDI,yCJ4QE,iBALI,C0B1MR,oBtB7DE,CsBkEJ,0DAEE,mBAsBE,iqBtBzEA,4BAA2B,CAD3B,yBACA,CsBsFF,0ItBxEE,2BAA0B,CAD1B,yBsB0EA,2CtBzEA,CsB6EF,uHtB7EE,2BAA0B,CAD1B,wBACA,CuBxBF,gBAME,iCALA,a3BoQE,gBALI,C2B7PN,iBxBu0BoC,CwBx0BpC,UxBsjCqB,CwB/iCvB,eAWE,kCxBoiCqB,CC/jCrB,sCuB0BA,UxBqiCqB,CwB3iCrB,a3BwPE,iBALI,C2BhPN,iBAFA,eACA,qBALA,kBACA,SACA,SvBnBA,CuBgCA,8HAEE,cA/CF,0DAyDI,yQAEA,yDADA,4BAEA,4DAPF,8CxBuhCmB,CwBphCjB,kCAIA,CAGF,sEACE,8CxB4gCiB,CwBvgCf,uDxBugCe,CwB5kCrB,0EAgFI,8EADA,kCACA,CAhFJ,wDAuFE,8CxBq/BmB,CwBl/BjB,4NAEE,iRAEA,6DACA,sEAFA,sBAEA,CAIJ,oEACE,8CxBw+BiB,CwBn+Bf,uDxBm+Be,CwB5kCrB,sEAkHI,4BAlHJ,kEAyHE,8CxBm9BmB,CwBj9BnB,kFACE,2CxBg9BiB,CwB78BnB,8EACE,uDxB48BiB,CwBz8BnB,sGACE,gCxBw8BiB,CwBn8BrB,qDACE,iBA1IF,kVAoJM,UAhIR,kBAME,mCALA,a3BoQE,gBALI,C2B7PN,iBxBu0BoC,CwBx0BpC,UxBsjCqB,CwB/iCvB,iBAWE,iCxBoiCqB,CC/jCrB,sCuB0BA,UxBqiCqB,CwB3iCrB,a3BwPE,iBALI,C2BhPN,iBAFA,eACA,qBALA,kBACA,SACA,SvBnBA,CuBgCA,8IAEE,cA/CF,8DAyDI,sUAEA,yDADA,4BAEA,4DAPF,gDxBuhCmB,CwBphCjB,kCAIA,CAGF,0EACE,gDxB4gCiB,CwBvgCf,sDxBugCe,CwB5kCrB,8EAgFI,8EADA,kCACA,CAhFJ,4DAuFE,gDxBq/BmB,CwBl/BjB,oOAEE,8UAEA,6DACA,sEAFA,sBAEA,CAIJ,wEACE,gDxBw+BiB,CwBn+Bf,sDxBm+Be,CwB5kCrB,0EAkHI,4BAlHJ,sEAyHE,gDxBm9BmB,CwBj9BnB,sFACE,6CxBg9BiB,CwB78BnB,kFACE,sDxB48BiB,CwBz8BnB,0GACE,kCxBw8BiB,CwBn8BrB,uDACE,iBA1IF,8VAsJM,UCxJV,KAEE,2BACA,4BACA,uB5BuRI,uBALI,C4BhRR,yBACA,yBACA,oCACA,wBACA,6CACA,kCACA,+CACA,wCACA,iFACA,+BACA,kFPhBA,iCOkCqB,CAFrB,mExBjBE,0CwBUF,0BAKA,eAXA,qBAEA,sC5BsQI,iCALI,C4B/PR,sCACA,sCAJA,wDAMA,kBACA,qBRfI,8HQmBJ,iBAFA,qBAOA,CRpBI,sCQhBN,KRiBQ,iBQqBN,WAGE,wCACA,8CAHA,+BAGA,CAGF,sBAGE,kCACA,wCAFA,yBAEA,CAGF,mBPpDA,uCOsDuB,CACrB,8CAME,0CARF,gCAGA,SAKE,CAIJ,8BACE,8CAME,0CALF,SAKE,CAIJ,mGAME,yCAGA,+CAJA,gCAIA,CAGA,yKAKI,0CAKN,sCAKI,0CAIJ,mDAKE,2CAEA,iDAJA,mCAKA,uCAJA,mBAIA,CAYF,aC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,qCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCDkGA,eC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,sCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCDkGA,aC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,qCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCDkGA,UC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,qCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCDkGA,aC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,oCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCDkGA,YC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,oCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCDkGA,WC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,sCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCDkGA,UC/GA,oBACA,oBACA,8BACA,0BACA,0BACA,oCACA,mCACA,2BACA,2BACA,qCACA,wDACA,6BACA,6BACA,uCD4HA,qBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,qCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBDmGA,uBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,sCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBDmGA,qBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,oCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBDmGA,kBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,qCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBDmGA,qBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,oCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBDmGA,oBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,oCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBDmGA,mBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,sCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBDmGA,kBChHA,uBACA,8BACA,0BACA,0BACA,oCACA,mCACA,2BACA,2BACA,qCACA,wDACA,gCACA,iCACA,uCACA,mBD+GF,UACE,yBACA,oCACA,wBACA,kCACA,gDACA,wCACA,iDACA,yCACA,gCACA,2CACA,+BACA,qCAEA,yBzB8QwC,CyBpQxC,wBACE,0BAGF,gBACE,gCAWJ,2BCjJE,0BACA,wB7B8NI,0BALI,C6BvNR,kDDkJF,2BCrJE,2BACA,0B7B8NI,2BALI,C6BvNR,kDCnEF,MVgBM,8BUfJ,CVmBI,sCUpBN,MVqBQ,iBUlBN,iBACE,UAMF,qBACE,aAIJ,YACE,SACA,gBVDI,2BUEJ,CVEI,sCULN,YVMQ,iBUDN,gCAEE,YVNE,2BUKF,OAEA,CVHE,sEACE,iBWpBR,sEAME,kBAGF,iBACE,mBCwBE,uBA/BF,gBACA,mCAFA,oCADA,sBAqCI,WAHA,qBACA,kB7B6hBwB,C6B5hBxB,qBAjCJ,CA0DE,6BACE,cD9CN,eAEE,0BACA,8BACA,0BACA,+BACA,8B/BuQI,4BALI,C+BhQR,yCACA,mCACA,8DACA,oDACA,kDACA,yFACA,4DACA,sCACA,8CACA,8CACA,oDACA,qDACA,qCACA,sDACA,2DACA,kCACA,qCACA,mCACA,oCACA,sCAcA,4BADA,uCAEA,6E3BzCE,+C2BoCF,+BALA,a/B6OI,sCALI,C+BjOR,gBAJA,SAFA,uCACA,kEAJA,kBAQA,gBAPA,iC3B9BE,C2B6CF,+BAEE,OACA,qCAFA,QAEA,CAwBA,qBACE,mBAAoB,CAEpB,qCAEE,MAAK,CADL,UACA,CAIJ,mBACE,iBAAkB,CAElB,mCAEE,UADA,OACA,CnB1CJ,wBmB4BA,wBACE,mBAAoB,CAEpB,wCAEE,MAAK,CADL,UACA,CAIJ,sBACE,iBAAkB,CAElB,sCAEE,UADA,OACA,EnB1CJ,wBmB4BA,wBACE,mBAAoB,CAEpB,wCAEE,MAAK,CADL,UACA,CAIJ,sBACE,iBAAkB,CAElB,sCAEE,UADA,OACA,EnB1CJ,wBmB4BA,wBACE,mBAAoB,CAEpB,wCAEE,MAAK,CADL,UACA,CAIJ,sBACE,iBAAkB,CAElB,sCAEE,UADA,OACA,EnB1CJ,yBmB4BA,wBACE,mBAAoB,CAEpB,wCAEE,MAAK,CADL,UACA,CAIJ,sBACE,iBAAkB,CAElB,sCAEE,UADA,OACA,EAUN,uCAEE,YAEA,wCADA,aAFA,QAGA,CCpFA,+BAxBF,yBACA,mCAFA,oCADA,aA8BI,WAHA,qBACA,kB7B6hBwB,C6B5hBxB,qBA1BJ,CAmDE,qCACE,cDgEJ,wCAGE,UAEA,sCADA,aAFA,WADA,KAIA,CClGA,gCAjBF,qCACA,uBAFA,eADA,kCAuBI,WAHA,qBACA,kB7B6hBwB,C6B5hBxB,qBAnBJ,CA4CE,sCACE,cD0EF,gCACE,iBAMJ,0CAGE,UAEA,uCADA,aAFA,WADA,KAIA,CCnHA,kCAIE,WAHA,qBAeE,aAdF,kB7B6hBwB,C6B5hBxB,qBACA,CAeA,mCA7BJ,qCADA,wBADA,kCAmCM,WAHA,qBACA,mB7B0gBsB,C6BzgBtB,qBAhCN,CAsCE,wCACE,cD2FF,mCACE,iBAON,kBAIE,mDAHA,SACA,6CAGA,SAAQ,CAFR,eAEA,CAMF,eAUE,6BACA,S3BtKE,sD2B+JF,WAEA,oCALA,cAIA,e5Byb4B,C4B3b5B,4EAIA,mBACA,qBACA,mBAPA,U3B7JE,C2ByKF,0CVxLA,kDU0LE,yCAEqB,CAGvB,4CV/LA,mDUiME,2CACA,oBACqB,CAGvB,gDAIE,6BAFA,6CACA,mBACA,CAMJ,oBACE,cAIF,iBAKE,sCAJA,c/BqEI,iBALI,C+B9DR,gBADA,gFAIA,mBAIF,oBAGE,oCAFA,cACA,2EACA,CAIF,oBAEE,4BACA,yBACA,8DACA,2BACA,iCACA,oCACA,4DACA,qDACA,qCACA,sDACA,0CACA,mCEtPF,+BAGE,oBADA,kBAEA,sBAEA,yCAEE,cADA,iBACA,CAKF,kXAME,UAKJ,aACE,aACA,eACA,2BAEA,0BACE,WAIJ,W7BhBI,sC6BoBF,qFAEE,4CAIF,qJ7BTE,4BAA2B,CAD3B,yBACA,C6BmBF,6G7BLE,2BAA0B,CAD1B,wBACA,C6BwBJ,uBAEE,sBADA,sBACA,CAEA,wGAGE,cAGF,yCACE,eAIJ,yEAEE,qBADA,qBACA,CAGF,yEAEE,oBADA,oBACA,CAoBF,oBAEE,uBADA,sBAEA,uBAEA,wDAEE,WAGF,4FAEE,2CAIF,qH7BzFE,2BAA0B,CAD1B,4BACA,C6B8FF,oF7B7GE,yBACA,0B8BxBJ,KAEE,6BACA,+BAEA,4BACA,yCACA,qDACA,uDAGA,aACA,eAGA,gBADA,gBADA,cAEA,CAGF,UAOE,gBACA,SAHA,+BAJA,clCuQI,sCALI,CkC/PR,2CAFA,kEAIA,qBdbI,iGcgBJ,CdZI,sCcGN,UdFQ,iBcaN,gCAEE,qCAIF,wBAEE,kDADA,S3BfoB,C2BoBtB,sCAEE,wCAEA,eADA,mBACA,CAQJ,UAEE,kDACA,kDACA,oDACA,2GACA,yDACA,+CACA,uGAGA,oFAEA,oBAEE,yD9B7CA,wDACA,yD8B2CA,sD9B3CA,C8B+CA,oDAIE,wDADA,iBACA,CAIJ,8DAGE,mDACA,yDAFA,0CAEA,CAGF,yB9B/DE,yBACA,yBAAwB,C8BgExB,mD9BhEA,C8B2EJ,WAEE,qDACA,sCACA,sCAGA,qB9B5FE,gD8BgGF,uDb/GA,oDaiHE,2CACqB,CASzB,eAEE,4BACA,yCACA,8DAGA,gCAEA,yBAGE,qEADA,eADA,eAEA,CAEA,8DAEE,iCAIJ,+DAIE,iCADA,gDADA,eAEA,CAUF,wCAEE,cACA,kBAKF,kDAEE,aACA,YACA,kBAMF,iEACE,WAUF,uBACE,aAEF,qBACE,cC7LJ,QAEE,wBACA,6BACA,0DACA,+DACA,kEACA,8DACA,sCACA,kCACA,oCACA,6DACA,mEACA,sCACA,sCACA,sCACA,sCACA,qRACA,yEACA,0DACA,0CACA,4DAMA,mBAFA,aACA,eAEA,8BACA,8DALA,iBAKA,CAMA,oIAGE,mBAFA,aACA,kBAEA,8BAoBJ,cAKE,mCnC0NI,0CALI,CmCvNR,+CADA,gDADA,6CAKA,qBACA,mBAEA,wCAEE,yCAUJ,YAEE,0BACA,+BAEA,4BACA,2CACA,uDACA,6DAGA,aACA,sBAGA,gBADA,gBADA,cAEA,CAGE,wDAEE,oCAIJ,2BACE,gBASJ,aAGE,6BADA,oBhC6gCkC,CgC9gClC,iBAEA,CAEA,yDAGE,oCAaJ,iBAKE,mBAJA,gBACA,WAGA,CAIF,gBAKE,6BACA,0E/BxIE,qD+BsIF,6BnCsII,4CALI,CmClIR,cAFA,8EftII,8Ce6IJ,CfzII,sCeiIN,gBfhIQ,iBe0IN,sBACE,qBAGF,sBAGE,sDADA,UADA,oBAEA,CAMJ,qBAKE,kDAEA,wBADA,4BAEA,qBAPA,qBAEA,aACA,sBAFA,WAMA,CAGF,mBACE,wCACA,gBvB1HE,wBuBsIA,kBAEI,iBACA,2BAEA,8BACE,mBAEA,6CACE,kBAGF,wCAEE,iDADA,iDACA,CAIJ,qCACE,iBAGF,mCACE,uBACA,gBAGF,kCACE,aAGF,6BAQE,uCACA,mBALA,YAEA,sBAJA,gBAQA,yBf9NJ,gBe2NI,6BAFA,qBAFA,YASA,CAGA,+CACE,aAGF,6CACE,aACA,YAEA,mBADA,SACA,EvB5LR,wBuBsIA,kBAEI,iBACA,2BAEA,8BACE,mBAEA,6CACE,kBAGF,wCAEE,iDADA,iDACA,CAIJ,qCACE,iBAGF,mCACE,uBACA,gBAGF,kCACE,aAGF,6BAQE,uCACA,mBALA,YAEA,sBAJA,gBAQA,yBf9NJ,gBe2NI,6BAFA,qBAFA,YASA,CAGA,+CACE,aAGF,6CACE,aACA,YAEA,mBADA,SACA,EvB5LR,wBuBsIA,kBAEI,iBACA,2BAEA,8BACE,mBAEA,6CACE,kBAGF,wCAEE,iDADA,iDACA,CAIJ,qCACE,iBAGF,mCACE,uBACA,gBAGF,kCACE,aAGF,6BAQE,uCACA,mBALA,YAEA,sBAJA,gBAQA,yBf9NJ,gBe2NI,6BAFA,qBAFA,YASA,CAGA,+CACE,aAGF,6CACE,aACA,YAEA,mBADA,SACA,EvB5LR,yBuBsIA,kBAEI,iBACA,2BAEA,8BACE,mBAEA,6CACE,kBAGF,wCAEE,iDADA,iDACA,CAIJ,qCACE,iBAGF,mCACE,uBACA,gBAGF,kCACE,aAGF,6BAQE,uCACA,mBALA,YAEA,sBAJA,gBAQA,yBf9NJ,gBe2NI,6BAFA,qBAFA,YASA,CAGA,+CACE,aAGF,6CACE,aACA,YAEA,mBADA,SACA,EAtDR,eAEI,iBACA,2BAEA,2BACE,mBAEA,0CACE,kBAGF,qCAEE,iDADA,iDACA,CAIJ,kCACE,iBAGF,gCACE,uBACA,gBAGF,+BACE,aAGF,0BAQE,uCACA,mBALA,YAEA,sBAJA,gBAQA,yBf9NJ,gBe2NI,6BAFA,qBAFA,YASA,CAGA,4CACE,aAGF,0CACE,aACA,YAEA,mBADA,SACA,CAiBZ,yCAGE,sCACA,4CACA,+CACA,8BACA,6BACA,mCACA,mDACA,CAME,mFANF,uRAOI,CCzRN,MAEE,wBACA,wBACA,gCACA,wBACA,2BACA,8CACA,0DACA,gDACA,uBACA,qFACA,+BACA,6BACA,qDACA,sBACA,mBACA,kBACA,+BACA,mCACA,+BASA,qBAEA,2BADA,mCAEA,qEhCjBE,2CgCaF,2BAJA,aACA,sBAEA,6BADA,YAHA,iBhCRE,CgCqBF,SAEE,aAAY,CADZ,cACA,CAGF,kBAEE,sBADA,kBACA,CAEA,8BhCrBA,0DACA,2DgCqBE,kBhCrBF,CgCyBA,6BhCXA,6DADA,8DgCaE,qBhCZF,CgCmBF,8DAEE,aAIJ,WAKE,2BAFA,cACA,uDACA,CAGF,YAEE,iCADA,2CACA,CAGF,eAGE,oCAFA,kDAEA,CAGF,qCAJE,eAKA,CAQA,sBACE,oCAQJ,aAIE,uCACA,4EAFA,+BADA,gBADA,iEAIA,CAEA,yBhC7FE,wFgCkGJ,aAGE,uCACA,yEAFA,+BADA,iEAGA,CAEA,wBhCxGE,wFgCkHJ,kBAIE,eAAc,CAFd,oDACA,mDAFA,mDAGA,CAEA,mCACE,mCACA,sCAIJ,mBAEE,mDADA,mDACA,CAIF,kBhCpII,iDgCwIF,SACA,OACA,2CALA,kBAEA,QADA,KhCtIE,CgC8IJ,yCAGE,WAGF,wBhC3II,0DACA,2DgC+IJ,2BhCjII,6DADA,6DACA,CgC8IF,kBACE,0CxB3HA,wBwBuHJ,YAQI,aACA,mBAGA,kBAEE,YACA,gBAEA,wBAEE,aAAY,CADZ,aACA,CAKA,mChC1KJ,4BAA2B,CAD3B,yBACA,CgC6KM,iGAGE,0BAEF,oGAGE,6BAIJ,oChC3KJ,2BAA0B,CAD1B,wBACA,CgC8KM,mGAGE,yBAEF,sGAGE,6BCpOZ,WAEE,0CACA,oCACA,0KACA,mDACA,mDACA,qDACA,0FACA,qCACA,kCACA,8CACA,6CACA,qPACA,sCACA,kDACA,8DACA,4PACA,4EACA,sCACA,mCACA,4DACA,qDAIF,kBAGE,mBAMA,4CACA,SjCrBE,gBiCkBF,oCALA,arC+PI,cALI,CqChPR,qBAPA,4EAJA,kBAOA,gBjBtBI,0CiBkBJ,UASA,CjBvBI,sCiBUN,kBjBTQ,iBiBwBN,kCAEE,+CACA,gGAFA,sCAEA,CAEA,wCACE,qDACA,iDAKJ,wBAME,8CACA,4BACA,mDAHA,WAJA,cAEA,0CACA,iBjB7CE,mDiB2CF,wCAOA,CjB9CE,sCiBqCJ,wBjBpCM,iBiBgDN,wBACE,UAGF,wBAGE,oDADA,UADA,SAEA,CAIJ,kBACE,gBAGF,gBAEE,wCACA,+EAFA,+BAEA,CAEA,8BjC7DE,yDACA,0DiC+DA,kEjChEA,+DACA,gEiCoEF,oCACE,aAIF,6BjC3DE,4DADA,4DACA,CiC+DE,2EjC/DF,kEADA,kEACA,CiCoEA,iDjCpEA,4DADA,4DACA,CiC0EJ,gBACE,8EASA,iCAEE,cjC9GA,eAAc,CiC6Gd,cjC7GA,CiCiHA,0DACA,4DAWA,yMjC7HA,gBiCqIA,6CACE,sTACA,6TC1JN,YAEE,4BACA,4BACA,mCAEA,qBACA,gCACA,wDACA,sCACA,4DASA,0FANA,aACA,etCiRI,wCALI,CsCxQR,gBAFA,iDADA,qEAIA,CAMA,kCACE,iDAEA,yCAGE,yCACA,yCAHA,WACA,iDAEA,CAIJ,wBACE,6CCrCJ,YAEE,kCACA,mCvC4RI,8BALI,CuCrRR,2CACA,qCACA,oDACA,oDACA,sDACA,uDACA,+CACA,0DACA,uDACA,gDACA,yEACA,kCACA,kCACA,4CACA,yDACA,mDACA,6DAGA,ajCnBA,gBADA,cACA,CiCuBF,WAOE,yCACA,iFAHA,iCAHA,cvCiQI,wCALI,CuC3PR,sEAFA,kBAKA,qBnBlBI,6HmBqBJ,CnBjBI,sCmBQN,WnBPQ,iBmBkBN,iBAIE,+CACA,qDAHA,uCADA,SAIA,CAGF,iBAGE,+CAEA,iDAHA,uCAEA,SpC2uCgC,CoC9uChC,SAIA,CAGF,qClBnDA,+CkBuDuB,CACrB,sDAFA,wCADA,SAGA,CAGF,yCAIE,kDACA,wDAHA,0CACA,mBAEA,CAKF,wCACE,2CpC8sCgC,CoCzsC9B,kCnC7BF,6DADA,yDACA,CmCmCE,iCnCjDF,8DADA,0DACA,CmCkEJ,eClGE,iCACA,kCxC0RI,iCALI,CwCnRR,yDDmGF,eCtGE,iCACA,kCxC0RI,kCALI,CwCnRR,yDCFF,OAEE,4BACA,4BzCuRI,2BALI,CyChRR,2BACA,sBACA,iDrCOE,4CqCCF,4BALA,qBzCgRI,mCALI,CyCxQR,wCACA,cAHA,4DAKA,kBAEA,wBADA,kBrCHE,CqCSF,aACE,aAKJ,YACE,kBACA,SChCF,OAEE,0BACA,0BACA,0BACA,8BACA,yBACA,oCACA,4EACA,iDACA,8BAOA,oCACA,8BtCHE,4CsCCF,4BADA,4CADA,4DADA,iBtCEE,CsCQJ,eAEE,cAIF,YAEE,iCADA,eACA,CAQF,mBACE,kBvCs+C8B,CuCn+C9B,8BAKE,qBAJA,kBAEA,QADA,MAEA,SACA,CAQF,eACE,iDACA,0CACA,wDACA,sDAJF,iBACE,mDACA,4CACA,0DACA,wDAJF,eACE,iDACA,0CACA,wDACA,sDAJF,YACE,8CACA,uCACA,qDACA,mDAJF,eACE,iDACA,0CACA,wDACA,sDAJF,cACE,gDACA,yCACA,uDACA,qDAJF,aACE,+CACA,wCACA,sDACA,oDAJF,YACE,8CACA,uCACA,qDACA,mDC5DF,gCACE,6BxCyhDgC,EwCphDpC,4BAGE,0B3CkRI,+BALI,C2C3QR,wCACA,oDACA,oDACA,6BACA,6BACA,6CAOA,uCvCRE,+CuCIF,a3CwQI,sCALI,C2ClQR,iCACA,evCNE,CuCaJ,cAQE,2CAHA,mCAJA,aACA,sBACA,uBACA,gBAEA,kBvBtBI,6CuBuBJ,kBAEA,CvBrBI,sCuBYN,cvBXQ,iBuBuBR,4LAEE,oEAGF,4BACE,iBAGF,0CACE,WAIA,uBACE,kDAGE,sCAJJ,uBAKM,gBC3DR,YAEE,2CACA,qCACA,oDACA,oDACA,sDACA,oCACA,sCACA,uDACA,4DACA,sDACA,yDACA,wDACA,yDACA,8CACA,kCACA,kCACA,4CxCHE,iDwCMF,aACA,sBAIA,gBADA,cxCVE,CwCeJ,qBAEE,sBADA,oBACA,CAEA,6CAEE,mCACA,0BASJ,wBAEE,wCACA,mBAFA,UAEA,CAGA,4DAKE,sDAFA,8CACA,qBAFA,SAGA,CAGF,+BAEE,uDADA,8CACA,CAQJ,iBAME,yCACA,iFAHA,iCAFA,cACA,gFAFA,kBAIA,oBAEA,CAEA,6BxCvDE,+BACA,gCwC0DF,4BxC5CE,kCADA,kCACA,CwCgDF,oDAIE,kDAFA,0CACA,mBACA,CAIF,wBAGE,gDACA,sDAFA,wCADA,SAGA,CAIF,kCACE,mBAEA,yCAEE,mDADA,qDACA,CAaF,uBACE,mBAGE,qExCvDJ,6DAZA,0BwCwEI,qExC5DJ,2BAA0B,CAZ1B,0DAYA,CwCiEI,+CACE,aAGF,yDAEE,mBAAkB,CADlB,kDACA,CAEA,gEAEE,oDADA,sDACA,ChCtFR,wBgC8DA,0BACE,mBAGE,wExCvDJ,6DAZA,0BwCwEI,wExC5DJ,2BAA0B,CAZ1B,0DAYA,CwCiEI,kDACE,aAGF,4DAEE,mBAAkB,CADlB,kDACA,CAEA,mEAEE,oDADA,sDACA,EhCtFR,wBgC8DA,0BACE,mBAGE,wExCvDJ,6DAZA,0BwCwEI,wExC5DJ,2BAA0B,CAZ1B,0DAYA,CwCiEI,kDACE,aAGF,4DAEE,mBAAkB,CADlB,kDACA,CAEA,mEAEE,oDADA,sDACA,EhCtFR,wBgC8DA,0BACE,mBAGE,wExCvDJ,6DAZA,0BwCwEI,wExC5DJ,2BAA0B,CAZ1B,0DAYA,CwCiEI,kDACE,aAGF,4DAEE,mBAAkB,CADlB,kDACA,CAEA,mEAEE,oDADA,sDACA,EhCtFR,yBgC8DA,0BACE,mBAGE,wExCvDJ,6DAZA,0BwCwEI,wExC5DJ,2BAA0B,CAZ1B,0DAYA,CwCiEI,kDACE,aAGF,4DAEE,mBAAkB,CADlB,kDACA,CAEA,mEAEE,oDADA,sDACA,EAcZ,kBxChJI,gBwCmJF,mCACE,mDAEA,8CACE,sBAaJ,yBACE,sDACA,+CACA,6DACA,4DACA,gEACA,6DACA,iEACA,yDACA,0DACA,oEAVF,2BACE,wDACA,iDACA,+DACA,4DACA,kEACA,6DACA,mEACA,2DACA,4DACA,sEAVF,yBACE,sDACA,+CACA,6DACA,4DACA,gEACA,6DACA,iEACA,yDACA,0DACA,oEAVF,sBACE,mDACA,4CACA,0DACA,4DACA,6DACA,6DACA,8DACA,sDACA,uDACA,iEAVF,yBACE,sDACA,+CACA,6DACA,4DACA,gEACA,6DACA,iEACA,yDACA,0DACA,oEAVF,wBACE,qDACA,8CACA,4DACA,4DACA,+DACA,6DACA,gEACA,wDACA,yDACA,mEAVF,uBACE,oDACA,6CACA,2DACA,4DACA,8DACA,6DACA,+DACA,uDACA,wDACA,kEAVF,sBACE,mDACA,4CACA,0DACA,4DACA,6DACA,6DACA,8DACA,sDACA,uDACA,iEC5LJ,WAEE,0BACA,oVACA,2BACA,kCACA,oEACA,+BACA,qCACA,uEAQA,wEACA,SzCJE,sByCFF,uBAEA,U1CopD2B,C0C9oD3B,oCALA,cAFA,SAOA,CAGA,4BAPA,+BAUE,CAHF,iBAGE,0CADA,oBACA,CAGF,iBAEE,4CACA,0CAFA,SAEA,CAGF,wCAIE,6CAFA,oBACA,gBACA,CAcA,iDATF,wCCjDF,OAEE,uBACA,6BACA,4BACA,0BACA,2B9CyRI,6BALI,C8ClRR,mBACA,+CACA,+CACA,2DACA,iDACA,2CACA,kDACA,sDACA,kEASA,4BADA,oCAEA,uE1CPE,4C0CQF,sCALA,4B9CyQI,mCALI,C8CtQR,eAGA,oBAJA,+B1CAE,C0CWF,eACE,UAGF,kBACE,aAIJ,iBACE,uBAKA,eACA,oBAJA,kBAEA,kBADA,8BAGA,CAEA,mCACE,sCAIJ,cAEE,mBAIA,4BADA,2CAEA,qF1ChCE,0FACA,2F0C4BF,mCAHA,aAEA,2D1C3BE,C0CkCF,yBAEE,sCADA,gDACA,CAIJ,YAEE,qBADA,iCACA,CC9DF,OAEE,uBACA,uBACA,wBACA,yBACA,mBACA,gCACA,2DACA,+CACA,oDACA,8CACA,yFACA,iCACA,iCACA,oCACA,sDACA,sDACA,iCACA,6BACA,uBACA,sDACA,sDAOA,aAEA,YAJA,OASA,SAAQ,CAJR,kBACA,gBARA,eACA,MAIA,WAFA,8BAQA,CAOF,cAGE,8BAEA,oBAJA,kBACA,UAGA,CAGA,0BAEE,4B3B9CE,iCjBg/C8B,CiB5+C9B,sC2BwCJ,0B3BvCM,iB2B2CN,0BACE,c5Cg8CgC,C4C57ClC,kCACE,qB5C67CgC,C4Cz7CpC,yBACE,6CAEA,wCACE,gBACA,gBAGF,qCACE,gBAIJ,uBAEE,mBADA,aAEA,iDAIF,eASE,4BADA,oCAEA,uE3CrFE,4C2CiFF,4BAJA,aACA,sBAWA,SAAQ,CAPR,oBANA,kBAGA,UAUA,CAIF,gBAEE,0BACA,sBACA,0BC5GA,uCADA,aAHA,OAFA,eACA,MAGA,YADA,iCDkH4D,CC5G5D,+BACA,uDD2G0F,CAK5F,cAGE,mBAEA,4F3CrGE,2DACA,4D2CgGF,aACA,cAEA,sC3CnGE,C2CuGF,yBAEE,6IADA,2FACA,CAKJ,aAEE,8CADA,eACA,CAKF,YAIE,cACA,gCAJA,iBAIA,CAIF,cAIE,mBAGA,2C3CvHE,8DADA,+D2CyHF,yFAPA,aACA,cACA,eAEA,yBACA,qE3CtHE,C2C8HF,gBACE,2CnC3GA,wBmCiHF,OACE,0BACA,2CAIF,cAGE,iBADA,kBADA,+BAEA,CAGF,UACE,wBnC9HA,wBmCmIF,oBAEE,wBnCrIA,yBmC0IF,UACE,yBAUA,kBAGE,YACA,QAAO,CAFP,eADA,WAGA,CAEA,iCAEE,S3CzMJ,eAAc,C2CwMV,W3CxMJ,C2C6ME,gE3C7MF,gB2CkNE,8BACE,gBnC1JJ,2BmCwIA,0BAGE,YACA,QAAO,CAFP,eADA,WAGA,CAEA,yCAEE,S3CzMJ,eAAc,C2CwMV,W3CxMJ,C2C6ME,gF3C7MF,gB2CkNE,sCACE,iBnC1JJ,2BmCwIA,0BAGE,YACA,QAAO,CAFP,eADA,WAGA,CAEA,yCAEE,S3CzMJ,eAAc,C2CwMV,W3CxMJ,C2C6ME,gF3C7MF,gB2CkNE,sCACE,iBnC1JJ,2BmCwIA,0BAGE,YACA,QAAO,CAFP,eADA,WAGA,CAEA,yCAEE,S3CzMJ,eAAc,C2CwMV,W3CxMJ,C2C6ME,gF3C7MF,gB2CkNE,sCACE,iBnC1JJ,4BmCwIA,0BAGE,YACA,QAAO,CAFP,eADA,WAGA,CAEA,yCAEE,S3CzMJ,eAAc,C2CwMV,W3CxMJ,C2C6ME,gF3C7MF,gB2CkNE,sCACE,iBErOR,SAEE,yBACA,6BACA,8BACA,+BACA,sBjDwRI,+BALI,CiDjRR,qCACA,yCACA,mDACA,yBACA,gCACA,iCAYA,qBARA,cCjBA,qC/C+lB4B,CHjUxB,qCALI,CkDvRR,kBACA,e/CwmB4B,C+CjmB5B,sBAIA,gBAVA,e/C+mB4B,C8CjmB5B,gCAQA,SAAQ,CCrBR,gBACA,iBACA,qBACA,iBACA,oBAGA,mBADA,kBAEA,oBDGA,gCAUA,CAEA,gDAEA,wBACE,cAEA,sCADA,mCACA,CAEA,+BAGE,yBACA,mBAFA,WADA,iBAGA,CAKN,2FACE,+CAEA,yGAGE,sCADA,qFADA,QAEA,CAKJ,6FAGE,qCAFA,6CACA,oCACA,CAEA,2GAGE,wCADA,4HADA,UAEA,CAMJ,iGACE,4CAEA,+GAGE,yCADA,qFADA,WAEA,CAKJ,8FAGE,qCAFA,8CACA,oCACA,CAEA,4GAGE,uCADA,4HADA,SAEA,CAsBJ,eAKE,sC7CjGE,8C6C+FF,8BAFA,sCACA,gEAEA,iB7ChGE,C+CnBJ,SAEE,yBACA,6BnD4RI,+BALI,CmDrRR,kCACA,iDACA,6DACA,sDACA,2FACA,6CACA,mCACA,qCnDmRI,kCALI,CmD5QR,kCACA,8CACA,iCACA,iCACA,6CACA,8BACA,iCACA,yDAWA,qBAEA,4BADA,sCAEA,2E/ChBE,8C+CMF,cDxBA,qC/C+lB4B,CHjUxB,qCALI,CkDvRR,kBACA,e/CwmB4B,C+CjmB5B,sBAIA,gBAVA,e/C+mB4B,CgD1lB5B,sCDpBA,gBACA,iBACA,qBACA,iBACA,oBAGA,mBADA,kBAEA,oBCUA,gC/CLE,C+CoBF,wBACE,cAEA,sCADA,mCACA,CAEA,6DAOE,0BAAa,CAHb,WADA,cADA,iBAKA,CAMJ,2FACE,kFAEA,gNAEE,qFAGF,yGAEE,gDADA,QACA,CAGF,uGAEE,sCADA,qCACA,CAOJ,6FAGE,qCAFA,gFACA,oCACA,CAEA,oNAEE,4HAGF,2GAEE,kDADA,MACA,CAGF,yGAEE,wCADA,mCACA,CAQJ,iGACE,+EAEA,4NAEE,qFAGF,+GAEE,mDADA,KACA,CAGF,6GAEE,yCADA,kCACA,CAKJ,iHAQE,+EADA,WAHA,cADA,SAGA,oDALA,kBACA,MAGA,mCAGA,CAMF,8FAGE,qCAFA,iFACA,oCACA,CAEA,sNAEE,4HAGF,4GAEE,iDADA,OACA,CAGF,0GAEE,uCADA,oCACA,CAuBN,gBAKE,6CACA,kF/C5JE,6DACA,8D+CyJF,qCnDyGI,4CALI,CmDtGR,gBADA,6E/CtJE,C+C8JF,sBACE,aAIJ,cAEE,mCADA,yEACA,CCrLF,UACE,kBAGF,wBACE,mBAGF,gBAGE,gBAFA,kBACA,UACA,CCtBA,sBAEE,WACA,WAFA,aAEA,CDuBJ,eAME,2BAJA,aACA,WAEA,mBAJA,kBhCbI,qCgCgBJ,UAGA,ChCfI,sCgCQN,ehCPQ,iBgCiBR,8DAGE,cAGF,wEAEE,2BAGF,wEAEE,4BASA,8BACE,UAEA,eADA,2BACA,CAGF,iJAIE,SAAQ,CADR,SACA,CAGF,oFAGE,UhC5DE,0BgC2DF,SAEA,ChCzDE,sCgCqDJ,oFhCpDM,iBgCiER,8CAQE,mBAMA,gBACA,SAXA,SAQA,UjD1FS,CiDqFT,aAEA,uBAOA,UjD6gDmC,CiDlhDnC,UATA,kBAWA,kBAVA,MhCzEI,6BgCgFJ,SjDkhDmC,CiDvhDnC,SAYA,ChCnFI,sCgCkEN,8ChCjEQ,iBgCqFN,oHAEE,UjDpGO,CiDuGP,WADA,UADA,oBjDugDiC,CiDlgDrC,uBACE,OAGF,uBACE,QAKF,wDAME,wBADA,4BAEA,0BALA,qBAEA,WjDqgDmC,CiDtgDnC,UAIA,CAGF,4BACE,uRAEF,4BACE,wRAQF,qBAGE,SAGA,aACA,uBAHA,OAOA,mBACA,gBAFA,gBjDs9CmC,CiDx9CnC,UAPA,kBACA,QAGA,SjD29CmC,CiDl9CnC,sCAWE,4BADA,qBjDlKO,CiDoKP,SAGA,qCADA,kCAbA,uBAQA,eAPA,cAEA,UjDo9CiC,CiDj9CjC,ejDm9CiC,CiDp9CjC,gBjDo9CiC,CiD18CjC,UjD28CiC,CiDt9CjC,UAGA,mBhCxJE,4BgCmJF,UAcA,ChC7JE,sCgC4IJ,sChC3IM,iBgC+JN,6BACE,SjDw8CiC,CiD/7CrC,kBAGE,cjDk8CmC,CiD97CnC,UjD7LS,CiD0LT,SAEA,sBjD87CmC,CiD/7CnC,mBjD+7CmC,CiDn8CnC,kBACA,UAMA,kBAMA,sFAEE,+BjDm8CiC,CiDh8CnC,qDACE,qBjDhMO,CiDmMT,iCACE,UjDpMO,CiD0LT,0OAEE,+BjDm8CiC,CiDh8CnC,yIACE,qBjDhMO,CiDmMT,iGACE,UjDpMO,CmDdX,8BAQE,6FADA,kBALA,qBAEA,gCACA,gDAFA,6BAKA,CAIF,0BACE,4BAIF,gBAEE,wBACA,yBACA,qCACA,iCACA,mCACA,2CAGA,gCACA,0GAGF,mBAEE,wBACA,yBACA,gCASF,wBACE,GACE,mBAEF,IACE,UACA,gBAKJ,cAEE,wBACA,yBACA,qCACA,mCACA,yCAGA,8BACA,UAGF,iBACE,wBACA,yBAIA,sCACE,8BAEE,mCC/EN,mEAEE,2BACA,2BACA,2BACA,8BACA,8BACA,0CACA,oCACA,mDACA,+DACA,kDACA,qDACA,qC3C6DE,2B2C5CF,cAWI,4BADA,wCAPA,SAKA,gCAHA,aACA,sBACA,eAKA,UAVA,enClBA,0CmCyBA,kBALA,kCAUA,EnC1BA,8DmCYJ,cnCXM,iBRuDJ,2B2C5BE,8BAIE,qFAFA,OADA,MAIA,4BAFA,+BAEA,CAGF,4BAIE,oFAFA,QADA,MAIA,2BAFA,+BAEA,CAGF,4BAME,sFALA,MAMA,4BAGF,2DANE,kCADA,OAEA,gBAHA,OAcA,CANF,+BAKE,mFACA,2BAGF,sDAEE,eAGF,8DAGE,oB3C5BJ,wB2C/BF,cAiEM,2BACA,8BACA,uCAEA,gCACE,aAGF,8BAME,uCALA,aACA,YAEA,mBADA,SAGA,E3CnCN,2B2C5CF,cAWI,4BADA,wCAPA,SAKA,gCAHA,aACA,sBACA,eAKA,UAVA,enClBA,0CmCyBA,kBALA,kCAUA,EnC1BA,8DmCYJ,cnCXM,iBRuDJ,2B2C5BE,8BAIE,qFAFA,OADA,MAIA,4BAFA,+BAEA,CAGF,4BAIE,oFAFA,QADA,MAIA,2BAFA,+BAEA,CAGF,4BAME,sFALA,MAMA,4BAGF,2DANE,kCADA,OAEA,gBAHA,OAcA,CANF,+BAKE,mFACA,2BAGF,sDAEE,eAGF,8DAGE,oB3C5BJ,wB2C/BF,cAiEM,2BACA,8BACA,uCAEA,gCACE,aAGF,8BAME,uCALA,aACA,YAEA,mBADA,SAGA,E3CnCN,2B2C5CF,cAWI,4BADA,wCAPA,SAKA,gCAHA,aACA,sBACA,eAKA,UAVA,enClBA,0CmCyBA,kBALA,kCAUA,EnC1BA,8DmCYJ,cnCXM,iBRuDJ,2B2C5BE,8BAIE,qFAFA,OADA,MAIA,4BAFA,+BAEA,CAGF,4BAIE,oFAFA,QADA,MAIA,2BAFA,+BAEA,CAGF,4BAME,sFALA,MAMA,4BAGF,2DANE,kCADA,OAEA,gBAHA,OAcA,CANF,+BAKE,mFACA,2BAGF,sDAEE,eAGF,8DAGE,oB3C5BJ,wB2C/BF,cAiEM,2BACA,8BACA,uCAEA,gCACE,aAGF,8BAME,uCALA,aACA,YAEA,mBADA,SAGA,E3CnCN,4B2C5CF,cAWI,4BADA,wCAPA,SAKA,gCAHA,aACA,sBACA,eAKA,UAVA,enClBA,0CmCyBA,kBALA,kCAUA,EnC1BA,+DmCYJ,cnCXM,iBRuDJ,4B2C5BE,8BAIE,qFAFA,OADA,MAIA,4BAFA,+BAEA,CAGF,4BAIE,oFAFA,QADA,MAIA,2BAFA,+BAEA,CAGF,4BAME,sFALA,MAMA,4BAGF,2DANE,kCADA,OAEA,gBAHA,OAcA,CANF,+BAKE,mFACA,2BAGF,sDAEE,eAGF,8DAGE,oB3C5BJ,yB2C/BF,cAiEM,2BACA,8BACA,uCAEA,gCACE,aAGF,8BAME,uCALA,aACA,YAEA,mBADA,SAGA,EA/ER,WAWI,4BADA,wCAPA,SAKA,gCAHA,aACA,sBACA,eAKA,UAVA,enClBA,0CmCyBA,kBALA,kCAUA,CnC1BA,sCmCYJ,WnCXM,iBmC2BF,2BAIE,qFAFA,OADA,MAIA,4BAFA,+BAEA,CAGF,yBAIE,oFAFA,QADA,MAIA,2BAFA,+BAEA,CAGF,yBAME,sFALA,MAMA,4BAGF,qDANE,kCADA,OAEA,gBAHA,OAcA,CANF,4BAKE,mFACA,2BAGF,gDAEE,eAGF,qDAGE,mBA2BR,oBP9GE,sBADA,aAHA,OAFA,eACA,MAGA,YADA,Y7CaS,C6CPT,mCACA,mC7Cm+CkC,CoDr3CpC,kBAEE,mBADA,aAEA,oEAEA,6BAEE,oIADA,qFACA,CAIJ,iBAEE,kDADA,eACA,CAGF,gBACE,YAEA,gBADA,mEACA,CC7IF,aAKE,8BADA,YAHA,qBACA,eAIA,WAHA,qBrDmzCkC,CqD9yClC,wBAEE,WADA,oBACA,CAKJ,gBACE,gBAGF,gBACE,gBAGF,gBACE,iBAKA,+BACE,mDAIJ,4BACE,IACE,UrDmxCgC,EqD/wCpC,kBAGE,8CAFA,wEACA,mBACA,CAGF,4BACE,GACE,uBH9CF,gBAEE,WACA,WAFA,aAEA,CIHF,iBAEE,8EADA,oBACA,CAFF,mBAEE,gFADA,oBACA,CAFF,iBAEE,8EADA,oBACA,CAFF,cAEE,2EADA,oBACA,CAFF,iBAEE,8EADA,oBACA,CAFF,gBAEE,6EADA,oBACA,CAFF,eAEE,4EADA,oBACA,CAFF,cAEE,2EADA,oBACA,CCFF,cACE,qEACA,+FAGE,wCAGE,yDACA,mFATN,gBACE,uEACA,iGAGE,4CAGE,yDACA,mFATN,cACE,qEACA,+FAGE,wCAGE,yDACA,mFATN,WACE,kEACA,4FAGE,kCAGE,0DACA,oFATN,cACE,qEACA,+FAGE,wCAGE,0DACA,oFATN,aACE,oEACA,8FAGE,sCAGE,yDACA,mFATN,YACE,mEACA,6FAGE,oCAGE,2DACA,qFATN,WACE,kEACA,4FAGE,kCAGE,wDACA,kFAOR,oBACE,4EACA,sGAGE,oDAEE,8EACA,wGC1BN,kBAGE,+IAFA,SAEA,CCHF,WAGE,mBAGA,2BALA,oBACA,WzD6c4B,CyD3c5B,+EACA,2BACA,CAEA,eAIE,kBAHA,cAEA,UzDsc0B,CiBjcxB,qCwCNF,SAGA,CxCOE,sCwCZJ,exCaM,iBwCDJ,8DACE,+DCnBN,OACE,kBACA,WAEA,cAGE,WAFA,cACA,kCACA,CAGF,SAKE,YAFA,OAFA,kBACA,MAEA,UACA,CAKF,WACE,uBADF,WACE,sBADF,YACE,yBADF,YACE,iCCrBJ,WAEE,K3D0mCkC,C2DpmCpC,yBAJE,OAHA,eAEA,QAEA,Y3DumCkC,C2DpmCpC,cAGE,Q3DimCkC,C2DvlChC,YAEE,K3DolC8B,C2DhlChC,2BALE,gBAEA,Y3DmlC8B,C2DhlChC,eAEE,Q3D8kC8B,CS9iChC,wBkDxCA,eACE,gBACA,MACA,Y3DmlC8B,C2DhlChC,kBAEE,SADA,gBAEA,Y3D6kC8B,ES9iChC,wBkDxCA,eACE,gBACA,MACA,Y3DmlC8B,C2DhlChC,kBAEE,SADA,gBAEA,Y3D6kC8B,ES9iChC,wBkDxCA,eACE,gBACA,MACA,Y3DmlC8B,C2DhlChC,kBAEE,SADA,gBAEA,Y3D6kC8B,ES9iChC,yBkDxCA,eACE,gBACA,MACA,Y3DmlC8B,C2DhlChC,kBAEE,SADA,gBAEA,Y3D6kC8B,E4D5mCpC,QAGE,mBADA,kBAEA,CAGF,gBAHE,mBAHA,YAUA,CAJF,QAEE,cACA,qBACA,CCRF,2ECSE,6BAEA,mBANA,qBAEA,sBACA,0BAFA,oBAIA,6BANA,mBAOA,CAGA,qGACE,4BCdF,sBAIE,SAGA,WAFA,OAJA,kBAEA,QADA,MAIA,SACA,CCRJ,+BCCE,uBACA,mBCNF,IAEE,mBAGA,8BAJA,qBAGA,eAEA,YAHA,4BlE8rB4B,CmE/nBtB,gBAOI,kCAPJ,WAOI,6BAPJ,cAOI,gCAPJ,cAOI,gCAPJ,mBAOI,qCAPJ,gBAOI,kCAPJ,aAOI,qBAPJ,WAOI,sBAPJ,YAOI,qBAPJ,oBAOI,6BAPJ,kBAOI,2BAPJ,iBAOI,0BAPJ,kBAOI,gCAPJ,iBAOI,0BAPJ,WAOI,oBAPJ,YAOI,sBAPJ,YAOI,qBAPJ,YAOI,sBAPJ,aAOI,oBAPJ,eAOI,wBAPJ,iBAOI,0BAPJ,kBAOI,2BAPJ,iBAOI,0BAPJ,iBAOI,0BAPJ,mBAOI,4BAPJ,oBAOI,6BAPJ,mBAOI,4BAPJ,iBAOI,0BAPJ,mBAOI,4BAPJ,oBAOI,6BAPJ,mBAOI,4BAPJ,UAOI,yBAPJ,gBAOI,+BAPJ,SAOI,wBAPJ,QAOI,uBAPJ,eAOI,8BAPJ,SAOI,wBAPJ,aAOI,4BAPJ,cAOI,6BAPJ,QAOI,uBAPJ,eAOI,8BAPJ,QAOI,uBAPJ,QAOI,0CAPJ,WAOI,6CAPJ,WAOI,6CAPJ,aAOI,0BAjBJ,oBACE,+EADF,sBACE,iFADF,oBACE,+EADF,iBACE,4EADF,oBACE,+EADF,mBACE,8EADF,kBACE,6EADF,iBACE,4EASF,iBAOI,0BAPJ,mBAOI,4BAPJ,mBAOI,4BAPJ,gBAOI,yBAPJ,iBAOI,0BAPJ,OAOI,gBAPJ,QAOI,kBAPJ,SAOI,mBAPJ,UAOI,mBAPJ,WAOI,qBAPJ,YAOI,sBAPJ,SAOI,iBAPJ,UAOI,mBAPJ,WAOI,oBAPJ,OAOI,kBAPJ,QAOI,oBAPJ,SAOI,qBAPJ,kBAOI,yCAPJ,oBAOI,qCAPJ,oBAOI,qCAPJ,QAOI,sFAPJ,UAOI,mBAPJ,YAOI,0FAPJ,cAOI,uBAPJ,YAOI,4FAPJ,cAOI,yBAPJ,eAOI,6FAPJ,iBAOI,0BAPJ,cAOI,2FAPJ,gBAOI,wBAPJ,gBAIQ,sBAGJ,4EAPJ,kBAIQ,sBAGJ,8EAPJ,gBAIQ,sBAGJ,4EAPJ,aAIQ,sBAGJ,yEAPJ,gBAIQ,sBAGJ,4EAPJ,eAIQ,sBAGJ,2EAPJ,cAIQ,sBAGJ,0EAPJ,aAIQ,sBAGJ,yEAPJ,cAIQ,sBAGJ,0EAPJ,cAIQ,sBAGJ,0EAPJ,uBAOI,uDAPJ,yBAOI,yDAPJ,uBAOI,uDAPJ,oBAOI,oDAPJ,uBAOI,uDAPJ,sBAOI,sDAPJ,qBAOI,qDAPJ,oBAOI,oDAPJ,UAOI,2BAPJ,UAOI,2BAPJ,UAOI,2BAPJ,UAOI,2BAPJ,UAOI,2BAjBJ,mBACE,wBADF,mBACE,yBADF,mBACE,wBADF,mBACE,yBADF,oBACE,sBASF,MAOI,oBAPJ,MAOI,oBAPJ,MAOI,oBAPJ,OAOI,qBAPJ,QAOI,qBAPJ,QAOI,yBAPJ,QAOI,sBAPJ,YAOI,0BAPJ,MAOI,qBAPJ,MAOI,qBAPJ,MAOI,qBAPJ,OAOI,sBAPJ,QAOI,sBAPJ,QAOI,0BAPJ,QAOI,uBAPJ,YAOI,2BAPJ,WAOI,wBAPJ,UAOI,6BAPJ,aAOI,gCAPJ,kBAOI,qCAPJ,qBAOI,wCAPJ,aAOI,sBAPJ,aAOI,sBAPJ,eAOI,wBAPJ,eAOI,wBAPJ,WAOI,yBAPJ,aAOI,2BAPJ,mBAOI,iCAPJ,uBAOI,qCAPJ,qBAOI,mCAPJ,wBAOI,iCAPJ,yBAOI,wCAPJ,wBAOI,uCAPJ,wBAOI,uCAPJ,mBAOI,iCAPJ,iBAOI,+BAPJ,oBAOI,6BAPJ,sBAOI,+BAPJ,qBAOI,8BAPJ,qBAOI,mCAPJ,mBAOI,iCAPJ,sBAOI,+BAPJ,uBAOI,sCAPJ,sBAOI,qCAPJ,uBAOI,gCAPJ,iBAOI,0BAPJ,kBAOI,gCAPJ,gBAOI,8BAPJ,mBAOI,4BAPJ,qBAOI,8BAPJ,oBAOI,6BAPJ,aAOI,mBAPJ,SAOI,kBAPJ,SAOI,kBAPJ,SAOI,kBAPJ,SAOI,kBAPJ,SAOI,kBAPJ,SAOI,kBAPJ,YAOI,kBAPJ,KAOI,mBAPJ,KAOI,wBAPJ,KAOI,uBAPJ,KAOI,sBAPJ,KAOI,wBAPJ,KAOI,sBAPJ,QAOI,sBAPJ,MAOI,iDAPJ,MAOI,2DAPJ,MAOI,yDAPJ,MAOI,uDAPJ,MAOI,2DAPJ,MAOI,uDAPJ,SAOI,uDAPJ,MAOI,iDAPJ,MAOI,2DAPJ,MAOI,yDAPJ,MAOI,uDAPJ,MAOI,2DAPJ,MAOI,uDAPJ,SAOI,uDAPJ,MAOI,uBAPJ,MAOI,4BAPJ,MAOI,2BAPJ,MAOI,0BAPJ,MAOI,4BAPJ,MAOI,0BAPJ,SAOI,0BAPJ,MAOI,yBAPJ,MAOI,8BAPJ,MAOI,6BAPJ,MAOI,4BAPJ,MAOI,8BAPJ,MAOI,4BAPJ,SAOI,4BAPJ,MAOI,0BAPJ,MAOI,+BAPJ,MAOI,8BAPJ,MAOI,6BAPJ,MAOI,+BAPJ,MAOI,6BAPJ,SAOI,6BAPJ,MAOI,wBAPJ,MAOI,6BAPJ,MAOI,4BAPJ,MAOI,2BAPJ,MAOI,6BAPJ,MAOI,2BAPJ,SAOI,2BAPJ,KAOI,oBAPJ,KAOI,yBAPJ,KAOI,wBAPJ,KAOI,uBAPJ,KAOI,yBAPJ,KAOI,uBAPJ,MAOI,mDAPJ,MAOI,6DAPJ,MAOI,2DAPJ,MAOI,yDAPJ,MAOI,6DAPJ,MAOI,yDAPJ,MAOI,mDAPJ,MAOI,6DAPJ,MAOI,2DAPJ,MAOI,yDAPJ,MAOI,6DAPJ,MAOI,yDAPJ,MAOI,wBAPJ,MAOI,6BAPJ,MAOI,4BAPJ,MAOI,2BAPJ,MAOI,6BAPJ,MAOI,2BAPJ,MAOI,0BAPJ,MAOI,+BAPJ,MAOI,8BAPJ,MAOI,6BAPJ,MAOI,+BAPJ,MAOI,6BAPJ,MAOI,2BAPJ,MAOI,gCAPJ,MAOI,+BAPJ,MAOI,8BAPJ,MAOI,gCAPJ,MAOI,8BAPJ,MAOI,yBAPJ,MAOI,8BAPJ,MAOI,6BAPJ,MAOI,4BAPJ,MAOI,8BAPJ,MAOI,4BAPJ,OAOI,gBAPJ,OAOI,qBAPJ,OAOI,oBAPJ,OAOI,mBAPJ,OAOI,qBAPJ,OAOI,mBAPJ,WAOI,oBAPJ,WAOI,yBAPJ,WAOI,wBAPJ,WAOI,uBAPJ,WAOI,yBAPJ,WAOI,uBAPJ,cAOI,uBAPJ,cAOI,4BAPJ,cAOI,2BAPJ,cAOI,0BAPJ,cAOI,4BAPJ,cAOI,0BAPJ,gBAOI,+CAPJ,MAOI,2CAPJ,MAOI,0CAPJ,MAOI,wCAPJ,MAOI,0CAPJ,MAOI,4BAPJ,MAOI,yBAPJ,YAOI,4BAPJ,YAOI,4BAPJ,YAOI,8BAPJ,UAOI,0BAPJ,WAOI,0BAPJ,WAOI,0BAPJ,aAOI,0BAPJ,SAOI,0BAPJ,WAOI,6BAPJ,MAOI,wBAPJ,OAOI,2BAPJ,SAOI,0BAPJ,OAOI,wBAPJ,YAOI,0BAPJ,UAOI,2BAPJ,aAOI,4BAPJ,sBAOI,+BAPJ,2BAOI,oCAPJ,8BAOI,uCAPJ,gBAOI,mCAPJ,gBAOI,mCAPJ,iBAOI,oCAPJ,WAOI,6BAPJ,aAOI,6BAPJ,YAOI,+DAPJ,cAIQ,oBAGJ,mEAPJ,gBAIQ,oBAGJ,qEAPJ,cAIQ,oBAGJ,mEAPJ,WAIQ,oBAGJ,gEAPJ,cAIQ,oBAGJ,mEAPJ,aAIQ,oBAGJ,kEAPJ,YAIQ,oBAGJ,iEAPJ,WAIQ,oBAGJ,gEAPJ,YAIQ,oBAGJ,iEAPJ,YAIQ,oBAGJ,iEAPJ,WAIQ,oBAGJ,sEAPJ,YAIQ,oBAGJ,0CAPJ,eAIQ,oBAGJ,+BAPJ,eAIQ,oBAGJ,mCAPJ,qBAIQ,oBAGJ,0CAPJ,oBAIQ,oBAGJ,yCAPJ,oBAIQ,oBAGJ,yCAPJ,YAIQ,oBAGJ,wBAjBJ,iBACE,uBADF,iBACE,sBADF,iBACE,uBADF,kBACE,oBASF,uBAOI,gDAPJ,yBAOI,kDAPJ,uBAOI,gDAPJ,oBAOI,6CAPJ,uBAOI,gDAPJ,sBAOI,+CAPJ,qBAOI,8CAPJ,oBAOI,6CAZF,8CACE,sBADF,8CACE,uBADF,8CACE,sBADF,8CACE,uBADF,gDACE,oBAgBF,0CAOI,uCAPJ,0CAOI,sCAPJ,0CAOI,uCAnBN,wBAIQ,8BAGJ,6FAPJ,0BAIQ,8BAGJ,+FAPJ,wBAIQ,8BAGJ,6FAPJ,qBAIQ,8BAGJ,0FAPJ,wBAIQ,8BAGJ,6FAPJ,uBAIQ,8BAGJ,4FAPJ,sBAIQ,8BAGJ,2FAPJ,qBAIQ,8BAGJ,0FAPJ,gBAIQ,8BAGJ,kGAZF,gEACE,8BADF,kEACE,gCADF,kEACE,iCADF,kEACE,gCADF,kEACE,iCADF,oEACE,8BAIJ,YAIQ,kBAGJ,4EAPJ,cAIQ,kBAGJ,8EAPJ,YAIQ,kBAGJ,4EAPJ,SAIQ,kBAGJ,yEAPJ,YAIQ,kBAGJ,4EAPJ,WAIQ,kBAGJ,2EAPJ,UAIQ,kBAGJ,0EAPJ,SAIQ,kBAGJ,yEAPJ,UAIQ,kBAGJ,0EAPJ,UAIQ,kBAGJ,0EAPJ,SAIQ,kBAGJ,4EAPJ,gBAIQ,kBAGJ,uCAPJ,mBAIQ,kBAGJ,iFAPJ,kBAIQ,kBAGJ,gFAjBJ,eACE,oBADF,eACE,qBADF,eACE,oBADF,eACE,qBADF,gBACE,kBASF,mBAOI,uDAPJ,qBAOI,yDAPJ,mBAOI,uDAPJ,gBAOI,oDAPJ,mBAOI,uDAPJ,kBAOI,sDAPJ,iBAOI,qDAPJ,gBAOI,oDAPJ,aAOI,8CAPJ,iBAOI,0BAPJ,kBAOI,2BAPJ,kBAOI,2BAPJ,SAOI,8BAPJ,SAOI,8BAPJ,SAOI,gDAPJ,WAOI,0BAPJ,WAOI,mDAPJ,WAOI,gDAPJ,WAOI,mDAPJ,WAOI,mDAPJ,WAOI,oDAPJ,gBAOI,4BAPJ,cAOI,qDAPJ,aAOI,mHAPJ,eAOI,uEAPJ,eAOI,yHAPJ,eAOI,mHAPJ,eAOI,yHAPJ,eAOI,yHAPJ,eAOI,2HAPJ,oBAOI,2EAPJ,kBAOI,6HAPJ,aAOI,uHAPJ,eAOI,2EAPJ,eAOI,6HAPJ,eAOI,uHAPJ,eAOI,6HAPJ,eAOI,6HAPJ,eAOI,+HAPJ,oBAOI,+EAPJ,kBAOI,iIAPJ,gBAOI,yHAPJ,kBAOI,6EAPJ,kBAOI,+HAPJ,kBAOI,yHAPJ,kBAOI,+HAPJ,kBAOI,+HAPJ,kBAOI,iIAPJ,uBAOI,iFAPJ,qBAOI,mIAPJ,eAOI,qHAPJ,iBAOI,yEAPJ,iBAOI,2HAPJ,iBAOI,qHAPJ,iBAOI,2HAPJ,iBAOI,2HAPJ,iBAOI,6HAPJ,sBAOI,6EAPJ,oBAOI,+HAPJ,SAOI,6BAPJ,WAOI,4BAPJ,MAOI,qBAPJ,KAOI,oBAPJ,KAOI,oBAPJ,KAOI,oBAPJ,KAOI,oB1DVR,wB0DGI,gBAOI,qBAPJ,cAOI,sBAPJ,eAOI,qBAPJ,uBAOI,6BAPJ,qBAOI,2BAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,oBAOI,0BAPJ,aAOI,yBAPJ,mBAOI,+BAPJ,YAOI,wBAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,YAOI,wBAPJ,gBAOI,4BAPJ,iBAOI,6BAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,WAOI,uBAPJ,cAOI,wBAPJ,aAOI,6BAPJ,gBAOI,gCAPJ,qBAOI,qCAPJ,wBAOI,wCAPJ,gBAOI,sBAPJ,gBAOI,sBAPJ,kBAOI,wBAPJ,kBAOI,wBAPJ,cAOI,yBAPJ,gBAOI,2BAPJ,sBAOI,iCAPJ,0BAOI,qCAPJ,wBAOI,mCAPJ,2BAOI,iCAPJ,4BAOI,wCAPJ,2BAOI,uCAPJ,2BAOI,uCAPJ,sBAOI,iCAPJ,oBAOI,+BAPJ,uBAOI,6BAPJ,yBAOI,+BAPJ,wBAOI,8BAPJ,wBAOI,mCAPJ,sBAOI,iCAPJ,yBAOI,+BAPJ,0BAOI,sCAPJ,yBAOI,qCAPJ,0BAOI,gCAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,mBAOI,8BAPJ,sBAOI,4BAPJ,wBAOI,8BAPJ,uBAOI,6BAPJ,gBAOI,mBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,eAOI,kBAPJ,QAOI,mBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,sBAPJ,QAOI,wBAPJ,QAOI,sBAPJ,WAOI,sBAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,uBAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,4BAPJ,SAOI,0BAPJ,YAOI,0BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,YAOI,4BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,YAOI,6BAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,YAOI,2BAPJ,QAOI,oBAPJ,QAOI,yBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,yBAPJ,QAOI,uBAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,gCAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,gCAPJ,SAOI,8BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,UAOI,gBAPJ,UAOI,qBAPJ,UAOI,oBAPJ,UAOI,mBAPJ,UAOI,qBAPJ,UAOI,mBAPJ,cAOI,oBAPJ,cAOI,yBAPJ,cAOI,wBAPJ,cAOI,uBAPJ,cAOI,yBAPJ,cAOI,uBAPJ,iBAOI,uBAPJ,iBAOI,4BAPJ,iBAOI,2BAPJ,iBAOI,0BAPJ,iBAOI,4BAPJ,iBAOI,0BAPJ,eAOI,0BAPJ,aAOI,2BAPJ,gBAOI,6B1DVR,wB0DGI,gBAOI,qBAPJ,cAOI,sBAPJ,eAOI,qBAPJ,uBAOI,6BAPJ,qBAOI,2BAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,oBAOI,0BAPJ,aAOI,yBAPJ,mBAOI,+BAPJ,YAOI,wBAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,YAOI,wBAPJ,gBAOI,4BAPJ,iBAOI,6BAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,WAOI,uBAPJ,cAOI,wBAPJ,aAOI,6BAPJ,gBAOI,gCAPJ,qBAOI,qCAPJ,wBAOI,wCAPJ,gBAOI,sBAPJ,gBAOI,sBAPJ,kBAOI,wBAPJ,kBAOI,wBAPJ,cAOI,yBAPJ,gBAOI,2BAPJ,sBAOI,iCAPJ,0BAOI,qCAPJ,wBAOI,mCAPJ,2BAOI,iCAPJ,4BAOI,wCAPJ,2BAOI,uCAPJ,2BAOI,uCAPJ,sBAOI,iCAPJ,oBAOI,+BAPJ,uBAOI,6BAPJ,yBAOI,+BAPJ,wBAOI,8BAPJ,wBAOI,mCAPJ,sBAOI,iCAPJ,yBAOI,+BAPJ,0BAOI,sCAPJ,yBAOI,qCAPJ,0BAOI,gCAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,mBAOI,8BAPJ,sBAOI,4BAPJ,wBAOI,8BAPJ,uBAOI,6BAPJ,gBAOI,mBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,eAOI,kBAPJ,QAOI,mBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,sBAPJ,QAOI,wBAPJ,QAOI,sBAPJ,WAOI,sBAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,uBAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,4BAPJ,SAOI,0BAPJ,YAOI,0BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,YAOI,4BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,YAOI,6BAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,YAOI,2BAPJ,QAOI,oBAPJ,QAOI,yBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,yBAPJ,QAOI,uBAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,gCAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,gCAPJ,SAOI,8BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,UAOI,gBAPJ,UAOI,qBAPJ,UAOI,oBAPJ,UAOI,mBAPJ,UAOI,qBAPJ,UAOI,mBAPJ,cAOI,oBAPJ,cAOI,yBAPJ,cAOI,wBAPJ,cAOI,uBAPJ,cAOI,yBAPJ,cAOI,uBAPJ,iBAOI,uBAPJ,iBAOI,4BAPJ,iBAOI,2BAPJ,iBAOI,0BAPJ,iBAOI,4BAPJ,iBAOI,0BAPJ,eAOI,0BAPJ,aAOI,2BAPJ,gBAOI,6B1DVR,wB0DGI,gBAOI,qBAPJ,cAOI,sBAPJ,eAOI,qBAPJ,uBAOI,6BAPJ,qBAOI,2BAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,oBAOI,0BAPJ,aAOI,yBAPJ,mBAOI,+BAPJ,YAOI,wBAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,YAOI,wBAPJ,gBAOI,4BAPJ,iBAOI,6BAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,WAOI,uBAPJ,cAOI,wBAPJ,aAOI,6BAPJ,gBAOI,gCAPJ,qBAOI,qCAPJ,wBAOI,wCAPJ,gBAOI,sBAPJ,gBAOI,sBAPJ,kBAOI,wBAPJ,kBAOI,wBAPJ,cAOI,yBAPJ,gBAOI,2BAPJ,sBAOI,iCAPJ,0BAOI,qCAPJ,wBAOI,mCAPJ,2BAOI,iCAPJ,4BAOI,wCAPJ,2BAOI,uCAPJ,2BAOI,uCAPJ,sBAOI,iCAPJ,oBAOI,+BAPJ,uBAOI,6BAPJ,yBAOI,+BAPJ,wBAOI,8BAPJ,wBAOI,mCAPJ,sBAOI,iCAPJ,yBAOI,+BAPJ,0BAOI,sCAPJ,yBAOI,qCAPJ,0BAOI,gCAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,mBAOI,8BAPJ,sBAOI,4BAPJ,wBAOI,8BAPJ,uBAOI,6BAPJ,gBAOI,mBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,eAOI,kBAPJ,QAOI,mBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,sBAPJ,QAOI,wBAPJ,QAOI,sBAPJ,WAOI,sBAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,uBAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,4BAPJ,SAOI,0BAPJ,YAOI,0BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,YAOI,4BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,YAOI,6BAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,YAOI,2BAPJ,QAOI,oBAPJ,QAOI,yBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,yBAPJ,QAOI,uBAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,gCAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,gCAPJ,SAOI,8BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,UAOI,gBAPJ,UAOI,qBAPJ,UAOI,oBAPJ,UAOI,mBAPJ,UAOI,qBAPJ,UAOI,mBAPJ,cAOI,oBAPJ,cAOI,yBAPJ,cAOI,wBAPJ,cAOI,uBAPJ,cAOI,yBAPJ,cAOI,uBAPJ,iBAOI,uBAPJ,iBAOI,4BAPJ,iBAOI,2BAPJ,iBAOI,0BAPJ,iBAOI,4BAPJ,iBAOI,0BAPJ,eAOI,0BAPJ,aAOI,2BAPJ,gBAOI,6B1DVR,yB0DGI,gBAOI,qBAPJ,cAOI,sBAPJ,eAOI,qBAPJ,uBAOI,6BAPJ,qBAOI,2BAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,oBAOI,0BAPJ,aAOI,yBAPJ,mBAOI,+BAPJ,YAOI,wBAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,YAOI,wBAPJ,gBAOI,4BAPJ,iBAOI,6BAPJ,WAOI,uBAPJ,kBAOI,8BAPJ,WAOI,uBAPJ,cAOI,wBAPJ,aAOI,6BAPJ,gBAOI,gCAPJ,qBAOI,qCAPJ,wBAOI,wCAPJ,gBAOI,sBAPJ,gBAOI,sBAPJ,kBAOI,wBAPJ,kBAOI,wBAPJ,cAOI,yBAPJ,gBAOI,2BAPJ,sBAOI,iCAPJ,0BAOI,qCAPJ,wBAOI,mCAPJ,2BAOI,iCAPJ,4BAOI,wCAPJ,2BAOI,uCAPJ,2BAOI,uCAPJ,sBAOI,iCAPJ,oBAOI,+BAPJ,uBAOI,6BAPJ,yBAOI,+BAPJ,wBAOI,8BAPJ,wBAOI,mCAPJ,sBAOI,iCAPJ,yBAOI,+BAPJ,0BAOI,sCAPJ,yBAOI,qCAPJ,0BAOI,gCAPJ,oBAOI,0BAPJ,qBAOI,gCAPJ,mBAOI,8BAPJ,sBAOI,4BAPJ,wBAOI,8BAPJ,uBAOI,6BAPJ,gBAOI,mBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,YAOI,kBAPJ,eAOI,kBAPJ,QAOI,mBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,sBAPJ,QAOI,wBAPJ,QAOI,sBAPJ,WAOI,sBAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,iDAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,uDAPJ,SAOI,2DAPJ,SAOI,uDAPJ,YAOI,uDAPJ,SAOI,uBAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,4BAPJ,SAOI,0BAPJ,YAOI,0BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,YAOI,4BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,YAOI,6BAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,YAOI,2BAPJ,QAOI,oBAPJ,QAOI,yBAPJ,QAOI,wBAPJ,QAOI,uBAPJ,QAOI,yBAPJ,QAOI,uBAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,mDAPJ,SAOI,6DAPJ,SAOI,2DAPJ,SAOI,yDAPJ,SAOI,6DAPJ,SAOI,yDAPJ,SAOI,wBAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,2BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,0BAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,+BAPJ,SAOI,6BAPJ,SAOI,2BAPJ,SAOI,gCAPJ,SAOI,+BAPJ,SAOI,8BAPJ,SAOI,gCAPJ,SAOI,8BAPJ,SAOI,yBAPJ,SAOI,8BAPJ,SAOI,6BAPJ,SAOI,4BAPJ,SAOI,8BAPJ,SAOI,4BAPJ,UAOI,gBAPJ,UAOI,qBAPJ,UAOI,oBAPJ,UAOI,mBAPJ,UAOI,qBAPJ,UAOI,mBAPJ,cAOI,oBAPJ,cAOI,yBAPJ,cAOI,wBAPJ,cAOI,uBAPJ,cAOI,yBAPJ,cAOI,uBAPJ,iBAOI,uBAPJ,iBAOI,4BAPJ,iBAOI,2BAPJ,iBAOI,0BAPJ,iBAOI,4BAPJ,iBAOI,0BAPJ,eAOI,0BAPJ,aAOI,2BAPJ,gBAOI,4BAPJ,MAOI,2BAPJ,MAOI,yBAPJ,MAOI,4BAPJ,MAOI,4BCnCZ,aD4BQ,gBAOI,yBAPJ,sBAOI,+BAPJ,eAOI,wBAPJ,cAOI,uBAPJ,qBAOI,8BAPJ,eAOI,wBAPJ,mBAOI,4BAPJ,oBAOI,6BAPJ,cAOI,uBAPJ,qBAOI,8BAPJ,cAOI","sources":["webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/styles/bootstrap.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_banner.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_root.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/vendor/_rfs.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_color-mode.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_reboot.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_variables.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_border-radius.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_type.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_lists.scss","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/styles/variables/_bootstrap.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_images.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_image.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_containers.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_container.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_breakpoints.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_grid.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_grid.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_tables.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_table-variants.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_labels.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_form-text.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_form-control.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_transition.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_gradients.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_form-select.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_form-check.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_form-range.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_floating-labels.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/forms/_input-group.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_forms.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_buttons.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_buttons.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_transitions.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_dropdown.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_caret.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_button-group.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_nav.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_navbar.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_card.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_accordion.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_breadcrumb.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_pagination.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_pagination.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_badge.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_alert.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_progress.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_list-group.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_close.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_toasts.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_modal.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_backdrop.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_tooltip.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_reset-text.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_popover.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_carousel.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_clearfix.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_spinners.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_offcanvas.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/_placeholders.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_color-bg.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_colored-links.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_focus-ring.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_icon-link.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_ratio.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_position.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_stacks.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_visually-hidden.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_visually-hidden.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_stretched-link.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_text-truncation.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_text-truncate.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/helpers/_vr.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/mixins/_utilities.scss","webpack://pydata_sphinx_theme/./node_modules/bootstrap/scss/utilities/_api.scss"],"sourcesContent":["/*!\n * Bootstrap v5.3.3 (https://getbootstrap.com/)\n * Copyright 2011-2024 The Bootstrap Authors\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */:root,[data-bs-theme=light]{--bs-blue: #0d6efd;--bs-indigo: #6610f2;--bs-purple: #6f42c1;--bs-pink: #d63384;--bs-red: #dc3545;--bs-orange: #fd7e14;--bs-yellow: #ffc107;--bs-green: #198754;--bs-teal: #20c997;--bs-cyan: #0dcaf0;--bs-black: #000;--bs-white: #fff;--bs-gray: #6c757d;--bs-gray-dark: #343a40;--bs-gray-100: #f8f9fa;--bs-gray-200: #e9ecef;--bs-gray-300: #dee2e6;--bs-gray-400: #ced4da;--bs-gray-500: #adb5bd;--bs-gray-600: #6c757d;--bs-gray-700: #495057;--bs-gray-800: #343a40;--bs-gray-900: #212529;--bs-primary: #0d6efd;--bs-secondary: #6c757d;--bs-success: #198754;--bs-info: #0dcaf0;--bs-warning: #ffc107;--bs-danger: #dc3545;--bs-light: #f8f9fa;--bs-dark: #212529;--bs-primary-rgb: 13, 110, 253;--bs-secondary-rgb: 108, 117, 125;--bs-success-rgb: 25, 135, 84;--bs-info-rgb: 13, 202, 240;--bs-warning-rgb: 255, 193, 7;--bs-danger-rgb: 220, 53, 69;--bs-light-rgb: 248, 249, 250;--bs-dark-rgb: 33, 37, 41;--bs-primary-text-emphasis: #052c65;--bs-secondary-text-emphasis: #2b2f32;--bs-success-text-emphasis: #0a3622;--bs-info-text-emphasis: #055160;--bs-warning-text-emphasis: #664d03;--bs-danger-text-emphasis: #58151c;--bs-light-text-emphasis: #495057;--bs-dark-text-emphasis: #495057;--bs-primary-bg-subtle: #cfe2ff;--bs-secondary-bg-subtle: #e2e3e5;--bs-success-bg-subtle: #d1e7dd;--bs-info-bg-subtle: #cff4fc;--bs-warning-bg-subtle: #fff3cd;--bs-danger-bg-subtle: #f8d7da;--bs-light-bg-subtle: #fcfcfd;--bs-dark-bg-subtle: #ced4da;--bs-primary-border-subtle: #9ec5fe;--bs-secondary-border-subtle: #c4c8cb;--bs-success-border-subtle: #a3cfbb;--bs-info-border-subtle: #9eeaf9;--bs-warning-border-subtle: #ffe69c;--bs-danger-border-subtle: #f1aeb5;--bs-light-border-subtle: #e9ecef;--bs-dark-border-subtle: #adb5bd;--bs-white-rgb: 255, 255, 255;--bs-black-rgb: 0, 0, 0;--bs-font-sans-serif: system-ui, -apple-system, \"Segoe UI\", Roboto, \"Helvetica Neue\", \"Noto Sans\", \"Liberation Sans\", Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\", \"Noto Color Emoji\";--bs-font-monospace: SFMono-Regular, Menlo, Monaco, Consolas, \"Liberation Mono\", \"Courier New\", monospace;--bs-gradient: linear-gradient(180deg, rgba(255, 255, 255, 0.15), rgba(255, 255, 255, 0));--bs-body-font-family: var(--bs-font-sans-serif);--bs-body-font-size:1rem;--bs-body-font-weight: 400;--bs-body-line-height: 1.5;--bs-body-color: #212529;--bs-body-color-rgb: 33, 37, 41;--bs-body-bg: #fff;--bs-body-bg-rgb: 255, 255, 255;--bs-emphasis-color: #000;--bs-emphasis-color-rgb: 0, 0, 0;--bs-secondary-color: rgba(33, 37, 41, 0.75);--bs-secondary-color-rgb: 33, 37, 41;--bs-secondary-bg: #e9ecef;--bs-secondary-bg-rgb: 233, 236, 239;--bs-tertiary-color: rgba(33, 37, 41, 0.5);--bs-tertiary-color-rgb: 33, 37, 41;--bs-tertiary-bg: #f8f9fa;--bs-tertiary-bg-rgb: 248, 249, 250;--bs-heading-color: inherit;--bs-link-color: #0d6efd;--bs-link-color-rgb: 13, 110, 253;--bs-link-decoration: underline;--bs-link-hover-color: #0a58ca;--bs-link-hover-color-rgb: 10, 88, 202;--bs-code-color: #d63384;--bs-highlight-color: #212529;--bs-highlight-bg: #fff3cd;--bs-border-width: 1px;--bs-border-style: solid;--bs-border-color: #dee2e6;--bs-border-color-translucent: rgba(0, 0, 0, 0.175);--bs-border-radius: 0.375rem;--bs-border-radius-sm: 0.25rem;--bs-border-radius-lg: 0.5rem;--bs-border-radius-xl: 1rem;--bs-border-radius-xxl: 2rem;--bs-border-radius-2xl: var(--bs-border-radius-xxl);--bs-border-radius-pill: 50rem;--bs-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);--bs-box-shadow-sm: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);--bs-box-shadow-lg: 0 1rem 3rem rgba(0, 0, 0, 0.175);--bs-box-shadow-inset: inset 0 1px 2px rgba(0, 0, 0, 0.075);--bs-focus-ring-width: 0.1875rem;--bs-focus-ring-opacity: 1;--bs-focus-ring-color: var(--pst-color-accent);--bs-form-valid-color: #198754;--bs-form-valid-border-color: #198754;--bs-form-invalid-color: #dc3545;--bs-form-invalid-border-color: #dc3545}[data-bs-theme=dark]{color-scheme:dark;--bs-body-color: #dee2e6;--bs-body-color-rgb: 222, 226, 230;--bs-body-bg: #212529;--bs-body-bg-rgb: 33, 37, 41;--bs-emphasis-color: #fff;--bs-emphasis-color-rgb: 255, 255, 255;--bs-secondary-color: rgba(222, 226, 230, 0.75);--bs-secondary-color-rgb: 222, 226, 230;--bs-secondary-bg: #343a40;--bs-secondary-bg-rgb: 52, 58, 64;--bs-tertiary-color: rgba(222, 226, 230, 0.5);--bs-tertiary-color-rgb: 222, 226, 230;--bs-tertiary-bg: #2b3035;--bs-tertiary-bg-rgb: 43, 48, 53;--bs-primary-text-emphasis: #6ea8fe;--bs-secondary-text-emphasis: #a7acb1;--bs-success-text-emphasis: #75b798;--bs-info-text-emphasis: #6edff6;--bs-warning-text-emphasis: #ffda6a;--bs-danger-text-emphasis: #ea868f;--bs-light-text-emphasis: #f8f9fa;--bs-dark-text-emphasis: #dee2e6;--bs-primary-bg-subtle: #031633;--bs-secondary-bg-subtle: #161719;--bs-success-bg-subtle: #051b11;--bs-info-bg-subtle: #032830;--bs-warning-bg-subtle: #332701;--bs-danger-bg-subtle: #2c0b0e;--bs-light-bg-subtle: #343a40;--bs-dark-bg-subtle: #1a1d20;--bs-primary-border-subtle: #084298;--bs-secondary-border-subtle: #41464b;--bs-success-border-subtle: #0f5132;--bs-info-border-subtle: #087990;--bs-warning-border-subtle: #997404;--bs-danger-border-subtle: #842029;--bs-light-border-subtle: #495057;--bs-dark-border-subtle: #343a40;--bs-heading-color: inherit;--bs-link-color: #6ea8fe;--bs-link-hover-color: #8bb9fe;--bs-link-color-rgb: 110, 168, 254;--bs-link-hover-color-rgb: 139, 185, 254;--bs-code-color: #e685b5;--bs-highlight-color: #dee2e6;--bs-highlight-bg: #664d03;--bs-border-color: #495057;--bs-border-color-translucent: rgba(255, 255, 255, 0.15);--bs-form-valid-color: #75b798;--bs-form-valid-border-color: #75b798;--bs-form-invalid-color: #ea868f;--bs-form-invalid-border-color: #ea868f}*,*::before,*::after{box-sizing:border-box}@media(prefers-reduced-motion: no-preference){:root{scroll-behavior:smooth}}body{margin:0;font-family:var(--bs-body-font-family);font-size:var(--bs-body-font-size);font-weight:var(--bs-body-font-weight);line-height:var(--bs-body-line-height);color:var(--bs-body-color);text-align:var(--bs-body-text-align);background-color:var(--bs-body-bg);-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:rgba(0,0,0,0)}hr{margin:1rem 0;color:inherit;border:0;border-top:var(--bs-border-width) solid;opacity:.25}h6,.h6,h5,.h5,h4,.h4,h3,.h3,h2,.h2,h1,.h1{margin-top:0;margin-bottom:.5rem;font-weight:500;line-height:1.2;color:var(--bs-heading-color)}h1,.h1{font-size:calc(1.375rem + 1.5vw)}@media(min-width: 1200px){h1,.h1{font-size:2.5rem}}h2,.h2{font-size:calc(1.325rem + 0.9vw)}@media(min-width: 1200px){h2,.h2{font-size:2rem}}h3,.h3{font-size:calc(1.3rem + 0.6vw)}@media(min-width: 1200px){h3,.h3{font-size:1.75rem}}h4,.h4{font-size:calc(1.275rem + 0.3vw)}@media(min-width: 1200px){h4,.h4{font-size:1.5rem}}h5,.h5{font-size:1.25rem}h6,.h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[title]{text-decoration:underline dotted;cursor:help;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}ol,ul,dl{margin-top:0;margin-bottom:1rem}ol ol,ul ul,ol ul,ul ol{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small,.small{font-size:0.875em}mark,.mark{padding:.1875em;color:var(--bs-highlight-color);background-color:var(--bs-highlight-bg)}sub,sup{position:relative;font-size:0.75em;line-height:0;vertical-align:baseline}sub{bottom:-0.25em}sup{top:-0.5em}a{color:rgba(var(--bs-link-color-rgb), var(--bs-link-opacity, 1));text-decoration:underline}a:hover{--bs-link-color-rgb: var(--bs-link-hover-color-rgb)}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}pre,code,kbd,samp{font-family:var(--bs-font-monospace);font-size:1em}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:0.875em}pre code{font-size:inherit;color:inherit;word-break:normal}code{font-size:0.875em;color:var(--bs-code-color);word-wrap:break-word}a>code{color:inherit}kbd{padding:.1875rem .375rem;font-size:0.875em;color:var(--bs-body-bg);background-color:var(--bs-body-color);border-radius:.25rem}kbd kbd{padding:0;font-size:1em}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:var(--bs-secondary-color);text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}thead,tbody,tfoot,tr,td,th{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}input,button,select,optgroup,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]:not([type=date]):not([type=datetime-local]):not([type=month]):not([type=week]):not([type=time])::-webkit-calendar-picker-indicator{display:none !important}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button}button:not(:disabled),[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + 0.3vw);line-height:inherit}@media(min-width: 1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-text,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::file-selector-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none !important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-6{font-size:2.5rem}}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:0.875em;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{margin-top:-1rem;margin-bottom:1rem;font-size:0.875em;color:#6c757d}.blockquote-footer::before{content:\"— \"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:var(--bs-body-bg);border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius);max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:0.875em;color:var(--bs-secondary-color)}.container,.container-fluid,.container-xl,.container-lg,.container-md,.container-sm{--bs-gutter-x: 1.5rem;--bs-gutter-y: 0;width:100%;padding-right:calc(var(--bs-gutter-x)*.5);padding-left:calc(var(--bs-gutter-x)*.5);margin-right:auto;margin-left:auto}@media(min-width: 540px){.container-sm,.container{max-width:540px}}@media(min-width: 720px){.container-md,.container-sm,.container{max-width:720px}}@media(min-width: 960px){.container-lg,.container-md,.container-sm,.container{max-width:960px}}@media(min-width: 1200px){.container-xl,.container-lg,.container-md,.container-sm,.container{max-width:1400px}}:root{--bs-breakpoint-xs: 0;--bs-breakpoint-sm: 540px;--bs-breakpoint-md: 720px;--bs-breakpoint-lg: 960px;--bs-breakpoint-xl: 1200px}.row{--bs-gutter-x: 1.5rem;--bs-gutter-y: 0;display:flex;flex-wrap:wrap;margin-top:calc(-1*var(--bs-gutter-y));margin-right:calc(-0.5*var(--bs-gutter-x));margin-left:calc(-0.5*var(--bs-gutter-x))}.row>*{flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--bs-gutter-x)*.5);padding-left:calc(var(--bs-gutter-x)*.5);margin-top:var(--bs-gutter-y)}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.66666667%}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.33333333%}.col-2{flex:0 0 auto;width:16.66666667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.33333333%}.col-5{flex:0 0 auto;width:41.66666667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.33333333%}.col-8{flex:0 0 auto;width:66.66666667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.33333333%}.col-11{flex:0 0 auto;width:91.66666667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.33333333%}.offset-2{margin-left:16.66666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.33333333%}.offset-5{margin-left:41.66666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.33333333%}.offset-8{margin-left:66.66666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.33333333%}.offset-11{margin-left:91.66666667%}.g-0,.gx-0{--bs-gutter-x: 0}.g-0,.gy-0{--bs-gutter-y: 0}.g-1,.gx-1{--bs-gutter-x: 0.25rem}.g-1,.gy-1{--bs-gutter-y: 0.25rem}.g-2,.gx-2{--bs-gutter-x: 0.5rem}.g-2,.gy-2{--bs-gutter-y: 0.5rem}.g-3,.gx-3{--bs-gutter-x: 1rem}.g-3,.gy-3{--bs-gutter-y: 1rem}.g-4,.gx-4{--bs-gutter-x: 1.5rem}.g-4,.gy-4{--bs-gutter-y: 1.5rem}.g-5,.gx-5{--bs-gutter-x: 3rem}.g-5,.gy-5{--bs-gutter-y: 3rem}@media(min-width: 540px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.66666667%}.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.33333333%}.col-sm-2{flex:0 0 auto;width:16.66666667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.33333333%}.col-sm-5{flex:0 0 auto;width:41.66666667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.33333333%}.col-sm-8{flex:0 0 auto;width:66.66666667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.33333333%}.col-sm-11{flex:0 0 auto;width:91.66666667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333333%}.offset-sm-2{margin-left:16.66666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.33333333%}.offset-sm-5{margin-left:41.66666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.33333333%}.offset-sm-8{margin-left:66.66666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.33333333%}.offset-sm-11{margin-left:91.66666667%}.g-sm-0,.gx-sm-0{--bs-gutter-x: 0}.g-sm-0,.gy-sm-0{--bs-gutter-y: 0}.g-sm-1,.gx-sm-1{--bs-gutter-x: 0.25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y: 0.25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x: 0.5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y: 0.5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x: 1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y: 1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x: 1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y: 1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x: 3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y: 3rem}}@media(min-width: 720px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.66666667%}.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.33333333%}.col-md-2{flex:0 0 auto;width:16.66666667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.33333333%}.col-md-5{flex:0 0 auto;width:41.66666667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.33333333%}.col-md-8{flex:0 0 auto;width:66.66666667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.33333333%}.col-md-11{flex:0 0 auto;width:91.66666667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333333%}.offset-md-2{margin-left:16.66666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.33333333%}.offset-md-5{margin-left:41.66666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.33333333%}.offset-md-8{margin-left:66.66666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.33333333%}.offset-md-11{margin-left:91.66666667%}.g-md-0,.gx-md-0{--bs-gutter-x: 0}.g-md-0,.gy-md-0{--bs-gutter-y: 0}.g-md-1,.gx-md-1{--bs-gutter-x: 0.25rem}.g-md-1,.gy-md-1{--bs-gutter-y: 0.25rem}.g-md-2,.gx-md-2{--bs-gutter-x: 0.5rem}.g-md-2,.gy-md-2{--bs-gutter-y: 0.5rem}.g-md-3,.gx-md-3{--bs-gutter-x: 1rem}.g-md-3,.gy-md-3{--bs-gutter-y: 1rem}.g-md-4,.gx-md-4{--bs-gutter-x: 1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y: 1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x: 3rem}.g-md-5,.gy-md-5{--bs-gutter-y: 3rem}}@media(min-width: 960px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.66666667%}.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.33333333%}.col-lg-2{flex:0 0 auto;width:16.66666667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.33333333%}.col-lg-5{flex:0 0 auto;width:41.66666667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.33333333%}.col-lg-8{flex:0 0 auto;width:66.66666667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.33333333%}.col-lg-11{flex:0 0 auto;width:91.66666667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333333%}.offset-lg-2{margin-left:16.66666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.33333333%}.offset-lg-5{margin-left:41.66666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.33333333%}.offset-lg-8{margin-left:66.66666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.33333333%}.offset-lg-11{margin-left:91.66666667%}.g-lg-0,.gx-lg-0{--bs-gutter-x: 0}.g-lg-0,.gy-lg-0{--bs-gutter-y: 0}.g-lg-1,.gx-lg-1{--bs-gutter-x: 0.25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y: 0.25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x: 0.5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y: 0.5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x: 1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y: 1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x: 1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y: 1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x: 3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y: 3rem}}@media(min-width: 1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.66666667%}.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.33333333%}.col-xl-2{flex:0 0 auto;width:16.66666667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.33333333%}.col-xl-5{flex:0 0 auto;width:41.66666667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.33333333%}.col-xl-8{flex:0 0 auto;width:66.66666667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.33333333%}.col-xl-11{flex:0 0 auto;width:91.66666667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333333%}.offset-xl-2{margin-left:16.66666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.33333333%}.offset-xl-5{margin-left:41.66666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.33333333%}.offset-xl-8{margin-left:66.66666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.33333333%}.offset-xl-11{margin-left:91.66666667%}.g-xl-0,.gx-xl-0{--bs-gutter-x: 0}.g-xl-0,.gy-xl-0{--bs-gutter-y: 0}.g-xl-1,.gx-xl-1{--bs-gutter-x: 0.25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y: 0.25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x: 0.5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y: 0.5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x: 1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y: 1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x: 1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y: 1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x: 3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y: 3rem}}.table{--bs-table-color-type: initial;--bs-table-bg-type: initial;--bs-table-color-state: initial;--bs-table-bg-state: initial;--bs-table-color: var(--bs-emphasis-color);--bs-table-bg: var(--bs-body-bg);--bs-table-border-color: var(--bs-border-color);--bs-table-accent-bg: transparent;--bs-table-striped-color: var(--bs-emphasis-color);--bs-table-striped-bg: rgba(var(--bs-emphasis-color-rgb), 0.05);--bs-table-active-color: var(--bs-emphasis-color);--bs-table-active-bg: rgba(var(--bs-emphasis-color-rgb), 0.1);--bs-table-hover-color: var(--bs-emphasis-color);--bs-table-hover-bg: rgba(var(--bs-emphasis-color-rgb), 0.075);width:100%;margin-bottom:1rem;vertical-align:top;border-color:var(--bs-table-border-color)}.table>:not(caption)>*>*{padding:.5rem .5rem;color:var(--bs-table-color-state, var(--bs-table-color-type, var(--bs-table-color)));background-color:var(--bs-table-bg);border-bottom-width:var(--bs-border-width);box-shadow:inset 0 0 0 9999px var(--bs-table-bg-state, var(--bs-table-bg-type, var(--bs-table-accent-bg)))}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table-group-divider{border-top:calc(var(--bs-border-width)*2) solid currentcolor}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem .25rem}.table-bordered>:not(caption)>*{border-width:var(--bs-border-width) 0}.table-bordered>:not(caption)>*>*{border-width:0 var(--bs-border-width)}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-borderless>:not(:first-child){border-top-width:0}.table-striped>tbody>tr:nth-of-type(odd)>*{--bs-table-color-type: var(--bs-table-striped-color);--bs-table-bg-type: var(--bs-table-striped-bg)}.table-striped-columns>:not(caption)>tr>:nth-child(even){--bs-table-color-type: var(--bs-table-striped-color);--bs-table-bg-type: var(--bs-table-striped-bg)}.table-active{--bs-table-color-state: var(--bs-table-active-color);--bs-table-bg-state: var(--bs-table-active-bg)}.table-hover>tbody>tr:hover>*{--bs-table-color-state: var(--bs-table-hover-color);--bs-table-bg-state: var(--bs-table-hover-bg)}.table-primary{--bs-table-color: #000;--bs-table-bg: #cfe2ff;--bs-table-border-color: #a6b5cc;--bs-table-striped-bg: #c5d7f2;--bs-table-striped-color: #000;--bs-table-active-bg: #bacbe6;--bs-table-active-color: #000;--bs-table-hover-bg: #bfd1ec;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-secondary{--bs-table-color: #000;--bs-table-bg: #e2e3e5;--bs-table-border-color: #b5b6b7;--bs-table-striped-bg: #d7d8da;--bs-table-striped-color: #000;--bs-table-active-bg: #cbccce;--bs-table-active-color: #000;--bs-table-hover-bg: #d1d2d4;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-success{--bs-table-color: #000;--bs-table-bg: #d1e7dd;--bs-table-border-color: #a7b9b1;--bs-table-striped-bg: #c7dbd2;--bs-table-striped-color: #000;--bs-table-active-bg: #bcd0c7;--bs-table-active-color: #000;--bs-table-hover-bg: #c1d6cc;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-info{--bs-table-color: #000;--bs-table-bg: #cff4fc;--bs-table-border-color: #a6c3ca;--bs-table-striped-bg: #c5e8ef;--bs-table-striped-color: #000;--bs-table-active-bg: #badce3;--bs-table-active-color: #000;--bs-table-hover-bg: #bfe2e9;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-warning{--bs-table-color: #000;--bs-table-bg: #fff3cd;--bs-table-border-color: #ccc2a4;--bs-table-striped-bg: #f2e7c3;--bs-table-striped-color: #000;--bs-table-active-bg: #e6dbb9;--bs-table-active-color: #000;--bs-table-hover-bg: #ece1be;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-danger{--bs-table-color: #000;--bs-table-bg: #f8d7da;--bs-table-border-color: #c6acae;--bs-table-striped-bg: #eccccf;--bs-table-striped-color: #000;--bs-table-active-bg: #dfc2c4;--bs-table-active-color: #000;--bs-table-hover-bg: #e5c7ca;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-light{--bs-table-color: #000;--bs-table-bg: #f8f9fa;--bs-table-border-color: #c6c7c8;--bs-table-striped-bg: #ecedee;--bs-table-striped-color: #000;--bs-table-active-bg: #dfe0e1;--bs-table-active-color: #000;--bs-table-hover-bg: #e5e6e7;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-dark{--bs-table-color: #fff;--bs-table-bg: #212529;--bs-table-border-color: #4d5154;--bs-table-striped-bg: #2c3034;--bs-table-striped-color: #fff;--bs-table-active-bg: #373b3e;--bs-table-active-color: #fff;--bs-table-hover-bg: #323539;--bs-table-hover-color: #fff;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-responsive{overflow-x:auto;-webkit-overflow-scrolling:touch}@media(max-width: 539.98px){.table-responsive-sm{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media(max-width: 719.98px){.table-responsive-md{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media(max-width: 959.98px){.table-responsive-lg{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media(max-width: 1199.98px){.table-responsive-xl{overflow-x:auto;-webkit-overflow-scrolling:touch}}.form-label{margin-bottom:.5rem}.col-form-label{padding-top:calc(0.375rem + var(--bs-border-width));padding-bottom:calc(0.375rem + var(--bs-border-width));margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(0.5rem + var(--bs-border-width));padding-bottom:calc(0.5rem + var(--bs-border-width));font-size:1.25rem}.col-form-label-sm{padding-top:calc(0.25rem + var(--bs-border-width));padding-bottom:calc(0.25rem + var(--bs-border-width));font-size:0.875rem}.form-text{margin-top:.25rem;font-size:0.875em;color:var(--bs-secondary-color)}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:var(--bs-body-color);appearance:none;background-color:var(--bs-body-bg);background-clip:padding-box;border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius);transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-control{transition:none}}.form-control[type=file]{overflow:hidden}.form-control[type=file]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{color:var(--bs-body-color);background-color:var(--bs-body-bg);border-color:#86b7fe;outline:0;box-shadow:0 0 0 .1875rem var(--pst-color-accent)}.form-control::-webkit-date-and-time-value{min-width:85px;height:1.5em;margin:0}.form-control::-webkit-datetime-edit{display:block;padding:0}.form-control::placeholder{color:var(--bs-secondary-color);opacity:1}.form-control:disabled{background-color:var(--bs-secondary-bg);opacity:1}.form-control::file-selector-button{padding:.375rem .75rem;margin:-0.375rem -0.75rem;margin-inline-end:.75rem;color:var(--bs-body-color);background-color:var(--bs-tertiary-bg);pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:var(--bs-border-width);border-radius:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:var(--bs-secondary-bg)}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;line-height:1.5;color:var(--bs-body-color);background-color:rgba(0,0,0,0);border:solid rgba(0,0,0,0);border-width:var(--bs-border-width) 0}.form-control-plaintext:focus{outline:0}.form-control-plaintext.form-control-sm,.form-control-plaintext.form-control-lg{padding-right:0;padding-left:0}.form-control-sm{min-height:calc(1.5em + 0.5rem + calc(var(--bs-border-width) * 2));padding:.25rem .5rem;font-size:0.875rem;border-radius:var(--bs-border-radius-sm)}.form-control-sm::file-selector-button{padding:.25rem .5rem;margin:-0.25rem -0.5rem;margin-inline-end:.5rem}.form-control-lg{min-height:calc(1.5em + 1rem + calc(var(--bs-border-width) * 2));padding:.5rem 1rem;font-size:1.25rem;border-radius:var(--bs-border-radius-lg)}.form-control-lg::file-selector-button{padding:.5rem 1rem;margin:-0.5rem -1rem;margin-inline-end:1rem}textarea.form-control{min-height:calc(1.5em + 0.75rem + calc(var(--bs-border-width) * 2))}textarea.form-control-sm{min-height:calc(1.5em + 0.5rem + calc(var(--bs-border-width) * 2))}textarea.form-control-lg{min-height:calc(1.5em + 1rem + calc(var(--bs-border-width) * 2))}.form-control-color{width:3rem;height:calc(1.5em + 0.75rem + calc(var(--bs-border-width) * 2));padding:.375rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{border:0 !important;border-radius:var(--bs-border-radius)}.form-control-color::-webkit-color-swatch{border:0 !important;border-radius:var(--bs-border-radius)}.form-control-color.form-control-sm{height:calc(1.5em + 0.5rem + calc(var(--bs-border-width) * 2))}.form-control-color.form-control-lg{height:calc(1.5em + 1rem + calc(var(--bs-border-width) * 2))}.form-select{--bs-form-select-bg-img: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e\");display:block;width:100%;padding:.375rem 2.25rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:var(--bs-body-color);appearance:none;background-color:var(--bs-body-bg);background-image:var(--bs-form-select-bg-img),var(--bs-form-select-bg-icon, none);background-repeat:no-repeat;background-position:right .75rem center;background-size:16px 12px;border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius);transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-select{transition:none}}.form-select:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .1875rem var(--pst-color-accent)}.form-select[multiple],.form-select[size]:not([size=\"1\"]){padding-right:.75rem;background-image:none}.form-select:disabled{background-color:var(--bs-secondary-bg)}.form-select:-moz-focusring{color:rgba(0,0,0,0);text-shadow:0 0 0 var(--bs-body-color)}.form-select-sm{padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:0.875rem;border-radius:var(--bs-border-radius-sm)}.form-select-lg{padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem;border-radius:var(--bs-border-radius-lg)}[data-bs-theme=dark] .form-select{--bs-form-select-bg-img: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23dee2e6' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e\")}.form-check{display:block;min-height:1.5rem;padding-left:1.5em;margin-bottom:.125rem}.form-check .form-check-input{float:left;margin-left:-1.5em}.form-check-reverse{padding-right:1.5em;padding-left:0;text-align:right}.form-check-reverse .form-check-input{float:right;margin-right:-1.5em;margin-left:0}.form-check-input{--bs-form-check-bg: var(--bs-body-bg);flex-shrink:0;width:1em;height:1em;margin-top:.25em;vertical-align:top;appearance:none;background-color:var(--bs-form-check-bg);background-image:var(--bs-form-check-bg-image);background-repeat:no-repeat;background-position:center;background-size:contain;border:var(--bs-border-width) solid var(--bs-border-color);print-color-adjust:exact}.form-check-input[type=checkbox]{border-radius:.25em}.form-check-input[type=radio]{border-radius:50%}.form-check-input:active{filter:brightness(90%)}.form-check-input:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .1875rem var(--pst-color-accent)}.form-check-input:checked{background-color:#0d6efd;border-color:#0d6efd}.form-check-input:checked[type=checkbox]{--bs-form-check-bg-image: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='m6 10 3 3 6-6'/%3e%3c/svg%3e\")}.form-check-input:checked[type=radio]{--bs-form-check-bg-image: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='2' fill='%23fff'/%3e%3c/svg%3e\")}.form-check-input[type=checkbox]:indeterminate{background-color:#0d6efd;border-color:#0d6efd;--bs-form-check-bg-image: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3e%3c/svg%3e\")}.form-check-input:disabled{pointer-events:none;filter:none;opacity:.5}.form-check-input[disabled]~.form-check-label,.form-check-input:disabled~.form-check-label{cursor:default;opacity:.5}.form-switch{padding-left:2.5em}.form-switch .form-check-input{--bs-form-switch-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%280, 0, 0, 0.25%29'/%3e%3c/svg%3e\");width:2em;margin-left:-2.5em;background-image:var(--bs-form-switch-bg);background-position:left center;border-radius:2em;transition:background-position .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{--bs-form-switch-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%2386b7fe'/%3e%3c/svg%3e\")}.form-switch .form-check-input:checked{background-position:right center;--bs-form-switch-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e\")}.form-switch.form-check-reverse{padding-right:2.5em;padding-left:0}.form-switch.form-check-reverse .form-check-input{margin-right:-2.5em;margin-left:0}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.btn-check[disabled]+.btn,.btn-check:disabled+.btn{pointer-events:none;filter:none;opacity:.65}[data-bs-theme=dark] .form-switch .form-check-input:not(:checked):not(:focus){--bs-form-switch-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%28255, 255, 255, 0.25%29'/%3e%3c/svg%3e\")}.form-range{width:100%;height:1.375rem;padding:0;appearance:none;background-color:rgba(0,0,0,0)}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .1875rem var(--pst-color-accent)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .1875rem var(--pst-color-accent)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-0.25rem;appearance:none;background-color:#0d6efd;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-range::-webkit-slider-thumb{transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#b6d4fe}.form-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:rgba(0,0,0,0);cursor:pointer;background-color:var(--bs-secondary-bg);border-color:rgba(0,0,0,0);border-radius:1rem}.form-range::-moz-range-thumb{width:1rem;height:1rem;appearance:none;background-color:#0d6efd;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-range::-moz-range-thumb{transition:none}}.form-range::-moz-range-thumb:active{background-color:#b6d4fe}.form-range::-moz-range-track{width:100%;height:.5rem;color:rgba(0,0,0,0);cursor:pointer;background-color:var(--bs-secondary-bg);border-color:rgba(0,0,0,0);border-radius:1rem}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:var(--bs-secondary-color)}.form-range:disabled::-moz-range-thumb{background-color:var(--bs-secondary-color)}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-control-plaintext,.form-floating>.form-select{height:calc(3.5rem + calc(var(--bs-border-width) * 2));min-height:calc(3.5rem + calc(var(--bs-border-width) * 2));line-height:1.25}.form-floating>label{position:absolute;top:0;left:0;z-index:2;height:100%;padding:1rem .75rem;overflow:hidden;text-align:start;text-overflow:ellipsis;white-space:nowrap;pointer-events:none;border:var(--bs-border-width) solid rgba(0,0,0,0);transform-origin:0 0;transition:opacity .1s ease-in-out,transform .1s ease-in-out}@media(prefers-reduced-motion: reduce){.form-floating>label{transition:none}}.form-floating>.form-control,.form-floating>.form-control-plaintext{padding:1rem .75rem}.form-floating>.form-control::placeholder,.form-floating>.form-control-plaintext::placeholder{color:rgba(0,0,0,0)}.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown),.form-floating>.form-control-plaintext:focus,.form-floating>.form-control-plaintext:not(:placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:-webkit-autofill,.form-floating>.form-control-plaintext:-webkit-autofill{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-select{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-control-plaintext~label,.form-floating>.form-select~label{color:rgba(var(--bs-body-color-rgb), 0.65);transform:scale(0.85) translateY(-0.5rem) translateX(0.15rem)}.form-floating>.form-control:focus~label::after,.form-floating>.form-control:not(:placeholder-shown)~label::after,.form-floating>.form-control-plaintext~label::after,.form-floating>.form-select~label::after{position:absolute;inset:1rem .375rem;z-index:-1;height:1.5em;content:\"\";background-color:var(--bs-body-bg);border-radius:var(--bs-border-radius)}.form-floating>.form-control:-webkit-autofill~label{color:rgba(var(--bs-body-color-rgb), 0.65);transform:scale(0.85) translateY(-0.5rem) translateX(0.15rem)}.form-floating>.form-control-plaintext~label{border-width:var(--bs-border-width) 0}.form-floating>:disabled~label,.form-floating>.form-control:disabled~label{color:#6c757d}.form-floating>:disabled~label::after,.form-floating>.form-control:disabled~label::after{background-color:var(--bs-secondary-bg)}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-select,.input-group>.form-floating{position:relative;flex:1 1 auto;width:1%;min-width:0}.input-group>.form-control:focus,.input-group>.form-select:focus,.input-group>.form-floating:focus-within{z-index:5}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:5}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:var(--bs-body-color);text-align:center;white-space:nowrap;background-color:var(--bs-tertiary-bg);border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius)}.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text,.input-group-lg>.btn{padding:.5rem 1rem;font-size:1.25rem;border-radius:var(--bs-border-radius-lg)}.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text,.input-group-sm>.btn{padding:.25rem .5rem;font-size:0.875rem;border-radius:var(--bs-border-radius-sm)}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group:not(.has-validation)>:not(:last-child):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating),.input-group:not(.has-validation)>.dropdown-toggle:nth-last-child(n+3),.input-group:not(.has-validation)>.form-floating:not(:last-child)>.form-control,.input-group:not(.has-validation)>.form-floating:not(:last-child)>.form-select{border-top-right-radius:0;border-bottom-right-radius:0}.input-group.has-validation>:nth-last-child(n+3):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating),.input-group.has-validation>.dropdown-toggle:nth-last-child(n+4),.input-group.has-validation>.form-floating:nth-last-child(n+3)>.form-control,.input-group.has-validation>.form-floating:nth-last-child(n+3)>.form-select{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){margin-left:calc(var(--bs-border-width)*-1);border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.form-floating:not(:first-child)>.form-control,.input-group>.form-floating:not(:first-child)>.form-select{border-top-left-radius:0;border-bottom-left-radius:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:0.875em;color:var(--bs-form-valid-color)}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:0.875rem;color:#fff;background-color:var(--bs-success);border-radius:var(--bs-border-radius)}.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip,.is-valid~.valid-feedback,.is-valid~.valid-tooltip{display:block}.was-validated .form-control:valid,.form-control.is-valid{border-color:var(--bs-form-valid-border-color);padding-right:calc(1.5em + 0.75rem);background-image:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e\");background-repeat:no-repeat;background-position:right calc(0.375em + 0.1875rem) center;background-size:calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-control:valid:focus,.form-control.is-valid:focus{border-color:var(--bs-form-valid-border-color);box-shadow:0 0 0 .1875rem rgba(var(--bs-success-rgb), 1)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + 0.75rem);background-position:top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem)}.was-validated .form-select:valid,.form-select.is-valid{border-color:var(--bs-form-valid-border-color)}.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select:valid:not([multiple])[size=\"1\"],.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid:not([multiple])[size=\"1\"]{--bs-form-select-bg-icon: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e\");padding-right:4.125rem;background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-select:valid:focus,.form-select.is-valid:focus{border-color:var(--bs-form-valid-border-color);box-shadow:0 0 0 .1875rem rgba(var(--bs-success-rgb), 1)}.was-validated .form-control-color:valid,.form-control-color.is-valid{width:calc(3rem + calc(1.5em + 0.75rem))}.was-validated .form-check-input:valid,.form-check-input.is-valid{border-color:var(--bs-form-valid-border-color)}.was-validated .form-check-input:valid:checked,.form-check-input.is-valid:checked{background-color:var(--bs-form-valid-color)}.was-validated .form-check-input:valid:focus,.form-check-input.is-valid:focus{box-shadow:0 0 0 .1875rem rgba(var(--bs-success-rgb), 1)}.was-validated .form-check-input:valid~.form-check-label,.form-check-input.is-valid~.form-check-label{color:var(--bs-form-valid-color)}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.was-validated .input-group>.form-control:not(:focus):valid,.input-group>.form-control:not(:focus).is-valid,.was-validated .input-group>.form-select:not(:focus):valid,.input-group>.form-select:not(:focus).is-valid,.was-validated .input-group>.form-floating:not(:focus-within):valid,.input-group>.form-floating:not(:focus-within).is-valid{z-index:3}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:0.875em;color:var(--bs-form-invalid-color)}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:0.875rem;color:#fff;background-color:var(--bs-danger);border-radius:var(--bs-border-radius)}.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip,.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip{display:block}.was-validated .form-control:invalid,.form-control.is-invalid{border-color:var(--bs-form-invalid-border-color);padding-right:calc(1.5em + 0.75rem);background-image:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e\");background-repeat:no-repeat;background-position:right calc(0.375em + 0.1875rem) center;background-size:calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-control:invalid:focus,.form-control.is-invalid:focus{border-color:var(--bs-form-invalid-border-color);box-shadow:0 0 0 .1875rem rgba(var(--bs-danger-rgb), 1)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + 0.75rem);background-position:top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem)}.was-validated .form-select:invalid,.form-select.is-invalid{border-color:var(--bs-form-invalid-border-color)}.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select:invalid:not([multiple])[size=\"1\"],.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid:not([multiple])[size=\"1\"]{--bs-form-select-bg-icon: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e\");padding-right:4.125rem;background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-select:invalid:focus,.form-select.is-invalid:focus{border-color:var(--bs-form-invalid-border-color);box-shadow:0 0 0 .1875rem rgba(var(--bs-danger-rgb), 1)}.was-validated .form-control-color:invalid,.form-control-color.is-invalid{width:calc(3rem + calc(1.5em + 0.75rem))}.was-validated .form-check-input:invalid,.form-check-input.is-invalid{border-color:var(--bs-form-invalid-border-color)}.was-validated .form-check-input:invalid:checked,.form-check-input.is-invalid:checked{background-color:var(--bs-form-invalid-color)}.was-validated .form-check-input:invalid:focus,.form-check-input.is-invalid:focus{box-shadow:0 0 0 .1875rem rgba(var(--bs-danger-rgb), 1)}.was-validated .form-check-input:invalid~.form-check-label,.form-check-input.is-invalid~.form-check-label{color:var(--bs-form-invalid-color)}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.was-validated .input-group>.form-control:not(:focus):invalid,.input-group>.form-control:not(:focus).is-invalid,.was-validated .input-group>.form-select:not(:focus):invalid,.input-group>.form-select:not(:focus).is-invalid,.was-validated .input-group>.form-floating:not(:focus-within):invalid,.input-group>.form-floating:not(:focus-within).is-invalid{z-index:4}.btn{--bs-btn-padding-x: 0.75rem;--bs-btn-padding-y: 0.375rem;--bs-btn-font-family: ;--bs-btn-font-size:1rem;--bs-btn-font-weight: 400;--bs-btn-line-height: 1.5;--bs-btn-color: var(--bs-body-color);--bs-btn-bg: transparent;--bs-btn-border-width: var(--bs-border-width);--bs-btn-border-color: transparent;--bs-btn-border-radius: var(--bs-border-radius);--bs-btn-hover-border-color: transparent;--bs-btn-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);--bs-btn-disabled-opacity: 0.65;--bs-btn-focus-box-shadow: 0 0 0 0.1875rem rgba(var(--bs-btn-focus-shadow-rgb), .5);display:inline-block;padding:var(--bs-btn-padding-y) var(--bs-btn-padding-x);font-family:var(--bs-btn-font-family);font-size:var(--bs-btn-font-size);font-weight:var(--bs-btn-font-weight);line-height:var(--bs-btn-line-height);color:var(--bs-btn-color);text-align:center;text-decoration:none;vertical-align:middle;cursor:pointer;user-select:none;border:var(--bs-btn-border-width) solid var(--bs-btn-border-color);border-radius:var(--bs-btn-border-radius);background-color:var(--bs-btn-bg);transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.btn{transition:none}}.btn:hover{color:var(--bs-btn-hover-color);background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color)}.btn-check+.btn:hover{color:var(--bs-btn-color);background-color:var(--bs-btn-bg);border-color:var(--bs-btn-border-color)}.btn:focus-visible{color:var(--bs-btn-hover-color);background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color);outline:0;box-shadow:var(--bs-btn-focus-box-shadow)}.btn-check:focus-visible+.btn{border-color:var(--bs-btn-hover-border-color);outline:0;box-shadow:var(--bs-btn-focus-box-shadow)}.btn-check:checked+.btn,:not(.btn-check)+.btn:active,.btn:first-child:active,.btn.active,.btn.show{color:var(--bs-btn-active-color);background-color:var(--bs-btn-active-bg);border-color:var(--bs-btn-active-border-color)}.btn-check:checked+.btn:focus-visible,:not(.btn-check)+.btn:active:focus-visible,.btn:first-child:active:focus-visible,.btn.active:focus-visible,.btn.show:focus-visible{box-shadow:var(--bs-btn-focus-box-shadow)}.btn-check:checked:focus-visible+.btn{box-shadow:var(--bs-btn-focus-box-shadow)}.btn:disabled,.btn.disabled,fieldset:disabled .btn{color:var(--bs-btn-disabled-color);pointer-events:none;background-color:var(--bs-btn-disabled-bg);border-color:var(--bs-btn-disabled-border-color);opacity:var(--bs-btn-disabled-opacity)}.btn-primary{--bs-btn-color: #fff;--bs-btn-bg: #0d6efd;--bs-btn-border-color: #0d6efd;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #0b5ed7;--bs-btn-hover-border-color: #0a58ca;--bs-btn-focus-shadow-rgb: 49, 132, 253;--bs-btn-active-color: #fff;--bs-btn-active-bg: #0a58ca;--bs-btn-active-border-color: #0a53be;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #0d6efd;--bs-btn-disabled-border-color: #0d6efd}.btn-secondary{--bs-btn-color: #fff;--bs-btn-bg: #6c757d;--bs-btn-border-color: #6c757d;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #5c636a;--bs-btn-hover-border-color: #565e64;--bs-btn-focus-shadow-rgb: 130, 138, 145;--bs-btn-active-color: #fff;--bs-btn-active-bg: #565e64;--bs-btn-active-border-color: #51585e;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #6c757d;--bs-btn-disabled-border-color: #6c757d}.btn-success{--bs-btn-color: #fff;--bs-btn-bg: #198754;--bs-btn-border-color: #198754;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #157347;--bs-btn-hover-border-color: #146c43;--bs-btn-focus-shadow-rgb: 60, 153, 110;--bs-btn-active-color: #fff;--bs-btn-active-bg: #146c43;--bs-btn-active-border-color: #13653f;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #198754;--bs-btn-disabled-border-color: #198754}.btn-info{--bs-btn-color: #000;--bs-btn-bg: #0dcaf0;--bs-btn-border-color: #0dcaf0;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #31d2f2;--bs-btn-hover-border-color: #25cff2;--bs-btn-focus-shadow-rgb: 11, 172, 204;--bs-btn-active-color: #000;--bs-btn-active-bg: #3dd5f3;--bs-btn-active-border-color: #25cff2;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #000;--bs-btn-disabled-bg: #0dcaf0;--bs-btn-disabled-border-color: #0dcaf0}.btn-warning{--bs-btn-color: #000;--bs-btn-bg: #ffc107;--bs-btn-border-color: #ffc107;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #ffca2c;--bs-btn-hover-border-color: #ffc720;--bs-btn-focus-shadow-rgb: 217, 164, 6;--bs-btn-active-color: #000;--bs-btn-active-bg: #ffcd39;--bs-btn-active-border-color: #ffc720;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #000;--bs-btn-disabled-bg: #ffc107;--bs-btn-disabled-border-color: #ffc107}.btn-danger{--bs-btn-color: #fff;--bs-btn-bg: #dc3545;--bs-btn-border-color: #dc3545;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #bb2d3b;--bs-btn-hover-border-color: #b02a37;--bs-btn-focus-shadow-rgb: 225, 83, 97;--bs-btn-active-color: #fff;--bs-btn-active-bg: #b02a37;--bs-btn-active-border-color: #a52834;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #dc3545;--bs-btn-disabled-border-color: #dc3545}.btn-light{--bs-btn-color: #000;--bs-btn-bg: #f8f9fa;--bs-btn-border-color: #f8f9fa;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #d3d4d5;--bs-btn-hover-border-color: #c6c7c8;--bs-btn-focus-shadow-rgb: 211, 212, 213;--bs-btn-active-color: #000;--bs-btn-active-bg: #c6c7c8;--bs-btn-active-border-color: #babbbc;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #000;--bs-btn-disabled-bg: #f8f9fa;--bs-btn-disabled-border-color: #f8f9fa}.btn-dark{--bs-btn-color: #fff;--bs-btn-bg: #212529;--bs-btn-border-color: #212529;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #424649;--bs-btn-hover-border-color: #373b3e;--bs-btn-focus-shadow-rgb: 66, 70, 73;--bs-btn-active-color: #fff;--bs-btn-active-bg: #4d5154;--bs-btn-active-border-color: #373b3e;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #212529;--bs-btn-disabled-border-color: #212529}.btn-outline-primary{--bs-btn-color: #0d6efd;--bs-btn-border-color: #0d6efd;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #0d6efd;--bs-btn-hover-border-color: #0d6efd;--bs-btn-focus-shadow-rgb: 13, 110, 253;--bs-btn-active-color: #fff;--bs-btn-active-bg: #0d6efd;--bs-btn-active-border-color: #0d6efd;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #0d6efd;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #0d6efd;--bs-gradient: none}.btn-outline-secondary{--bs-btn-color: #6c757d;--bs-btn-border-color: #6c757d;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #6c757d;--bs-btn-hover-border-color: #6c757d;--bs-btn-focus-shadow-rgb: 108, 117, 125;--bs-btn-active-color: #fff;--bs-btn-active-bg: #6c757d;--bs-btn-active-border-color: #6c757d;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #6c757d;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #6c757d;--bs-gradient: none}.btn-outline-success{--bs-btn-color: #198754;--bs-btn-border-color: #198754;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #198754;--bs-btn-hover-border-color: #198754;--bs-btn-focus-shadow-rgb: 25, 135, 84;--bs-btn-active-color: #fff;--bs-btn-active-bg: #198754;--bs-btn-active-border-color: #198754;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #198754;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #198754;--bs-gradient: none}.btn-outline-info{--bs-btn-color: #0dcaf0;--bs-btn-border-color: #0dcaf0;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #0dcaf0;--bs-btn-hover-border-color: #0dcaf0;--bs-btn-focus-shadow-rgb: 13, 202, 240;--bs-btn-active-color: #000;--bs-btn-active-bg: #0dcaf0;--bs-btn-active-border-color: #0dcaf0;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #0dcaf0;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #0dcaf0;--bs-gradient: none}.btn-outline-warning{--bs-btn-color: #ffc107;--bs-btn-border-color: #ffc107;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #ffc107;--bs-btn-hover-border-color: #ffc107;--bs-btn-focus-shadow-rgb: 255, 193, 7;--bs-btn-active-color: #000;--bs-btn-active-bg: #ffc107;--bs-btn-active-border-color: #ffc107;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #ffc107;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #ffc107;--bs-gradient: none}.btn-outline-danger{--bs-btn-color: #dc3545;--bs-btn-border-color: #dc3545;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #dc3545;--bs-btn-hover-border-color: #dc3545;--bs-btn-focus-shadow-rgb: 220, 53, 69;--bs-btn-active-color: #fff;--bs-btn-active-bg: #dc3545;--bs-btn-active-border-color: #dc3545;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #dc3545;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #dc3545;--bs-gradient: none}.btn-outline-light{--bs-btn-color: #f8f9fa;--bs-btn-border-color: #f8f9fa;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #f8f9fa;--bs-btn-hover-border-color: #f8f9fa;--bs-btn-focus-shadow-rgb: 248, 249, 250;--bs-btn-active-color: #000;--bs-btn-active-bg: #f8f9fa;--bs-btn-active-border-color: #f8f9fa;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #f8f9fa;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #f8f9fa;--bs-gradient: none}.btn-outline-dark{--bs-btn-color: #212529;--bs-btn-border-color: #212529;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #212529;--bs-btn-hover-border-color: #212529;--bs-btn-focus-shadow-rgb: 33, 37, 41;--bs-btn-active-color: #fff;--bs-btn-active-bg: #212529;--bs-btn-active-border-color: #212529;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #212529;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #212529;--bs-gradient: none}.btn-link{--bs-btn-font-weight: 400;--bs-btn-color: var(--bs-link-color);--bs-btn-bg: transparent;--bs-btn-border-color: transparent;--bs-btn-hover-color: var(--bs-link-hover-color);--bs-btn-hover-border-color: transparent;--bs-btn-active-color: var(--bs-link-hover-color);--bs-btn-active-border-color: transparent;--bs-btn-disabled-color: #6c757d;--bs-btn-disabled-border-color: transparent;--bs-btn-box-shadow: 0 0 0 #000;--bs-btn-focus-shadow-rgb: 49, 132, 253;text-decoration:underline}.btn-link:focus-visible{color:var(--bs-btn-color)}.btn-link:hover{color:var(--bs-btn-hover-color)}.btn-lg,.btn-group-lg>.btn{--bs-btn-padding-y: 0.5rem;--bs-btn-padding-x: 1rem;--bs-btn-font-size:1.25rem;--bs-btn-border-radius: var(--bs-border-radius-lg)}.btn-sm,.btn-group-sm>.btn{--bs-btn-padding-y: 0.25rem;--bs-btn-padding-x: 0.5rem;--bs-btn-font-size:0.875rem;--bs-btn-border-radius: var(--bs-border-radius-sm)}.fade{transition:opacity .15s linear}@media(prefers-reduced-motion: reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height .35s ease}@media(prefers-reduced-motion: reduce){.collapsing{transition:none}}.collapsing.collapse-horizontal{width:0;height:auto;transition:width .35s ease}@media(prefers-reduced-motion: reduce){.collapsing.collapse-horizontal{transition:none}}.dropup,.dropend,.dropdown,.dropstart,.dropup-center,.dropdown-center{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:\"\";border-top:.3em solid;border-right:.3em solid rgba(0,0,0,0);border-bottom:0;border-left:.3em solid rgba(0,0,0,0)}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{--bs-dropdown-zindex: 1000;--bs-dropdown-min-width: 10rem;--bs-dropdown-padding-x: 0;--bs-dropdown-padding-y: 0.5rem;--bs-dropdown-spacer: 0.125rem;--bs-dropdown-font-size:1rem;--bs-dropdown-color: var(--bs-body-color);--bs-dropdown-bg: var(--bs-body-bg);--bs-dropdown-border-color: var(--bs-border-color-translucent);--bs-dropdown-border-radius: var(--bs-border-radius);--bs-dropdown-border-width: var(--bs-border-width);--bs-dropdown-inner-border-radius: calc(var(--bs-border-radius) - var(--bs-border-width));--bs-dropdown-divider-bg: var(--bs-border-color-translucent);--bs-dropdown-divider-margin-y: 0.5rem;--bs-dropdown-box-shadow: var(--bs-box-shadow);--bs-dropdown-link-color: var(--bs-body-color);--bs-dropdown-link-hover-color: var(--bs-body-color);--bs-dropdown-link-hover-bg: var(--pst-color-surface);--bs-dropdown-link-active-color: #fff;--bs-dropdown-link-active-bg: var(--pst-color-surface);--bs-dropdown-link-disabled-color: var(--bs-tertiary-color);--bs-dropdown-item-padding-x: 1rem;--bs-dropdown-item-padding-y: 0.25rem;--bs-dropdown-header-color: #6c757d;--bs-dropdown-header-padding-x: 1rem;--bs-dropdown-header-padding-y: 0.5rem;position:absolute;z-index:var(--bs-dropdown-zindex);display:none;min-width:var(--bs-dropdown-min-width);padding:var(--bs-dropdown-padding-y) var(--bs-dropdown-padding-x);margin:0;font-size:var(--bs-dropdown-font-size);color:var(--bs-dropdown-color);text-align:left;list-style:none;background-color:var(--bs-dropdown-bg);background-clip:padding-box;border:var(--bs-dropdown-border-width) solid var(--bs-dropdown-border-color);border-radius:var(--bs-dropdown-border-radius)}.dropdown-menu[data-bs-popper]{top:100%;left:0;margin-top:var(--bs-dropdown-spacer)}.dropdown-menu-start{--bs-position: start}.dropdown-menu-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-end{--bs-position: end}.dropdown-menu-end[data-bs-popper]{right:0;left:auto}@media(min-width: 540px){.dropdown-menu-sm-start{--bs-position: start}.dropdown-menu-sm-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-sm-end{--bs-position: end}.dropdown-menu-sm-end[data-bs-popper]{right:0;left:auto}}@media(min-width: 720px){.dropdown-menu-md-start{--bs-position: start}.dropdown-menu-md-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-md-end{--bs-position: end}.dropdown-menu-md-end[data-bs-popper]{right:0;left:auto}}@media(min-width: 960px){.dropdown-menu-lg-start{--bs-position: start}.dropdown-menu-lg-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-lg-end{--bs-position: end}.dropdown-menu-lg-end[data-bs-popper]{right:0;left:auto}}@media(min-width: 1200px){.dropdown-menu-xl-start{--bs-position: start}.dropdown-menu-xl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xl-end{--bs-position: end}.dropdown-menu-xl-end[data-bs-popper]{right:0;left:auto}}.dropup .dropdown-menu[data-bs-popper]{top:auto;bottom:100%;margin-top:0;margin-bottom:var(--bs-dropdown-spacer)}.dropup .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:\"\";border-top:0;border-right:.3em solid rgba(0,0,0,0);border-bottom:.3em solid;border-left:.3em solid rgba(0,0,0,0)}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{top:0;right:auto;left:100%;margin-top:0;margin-left:var(--bs-dropdown-spacer)}.dropend .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:\"\";border-top:.3em solid rgba(0,0,0,0);border-right:0;border-bottom:.3em solid rgba(0,0,0,0);border-left:.3em solid}.dropend .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-toggle::after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{top:0;right:100%;left:auto;margin-top:0;margin-right:var(--bs-dropdown-spacer)}.dropstart .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:\"\"}.dropstart .dropdown-toggle::after{display:none}.dropstart .dropdown-toggle::before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:\"\";border-top:.3em solid rgba(0,0,0,0);border-right:.3em solid;border-bottom:.3em solid rgba(0,0,0,0)}.dropstart .dropdown-toggle:empty::after{margin-left:0}.dropstart .dropdown-toggle::before{vertical-align:0}.dropdown-divider{height:0;margin:var(--bs-dropdown-divider-margin-y) 0;overflow:hidden;border-top:1px solid var(--bs-dropdown-divider-bg);opacity:1}.dropdown-item{display:block;width:100%;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x);clear:both;font-weight:400;color:var(--bs-dropdown-link-color);text-align:inherit;text-decoration:none;white-space:nowrap;background-color:rgba(0,0,0,0);border:0;border-radius:var(--bs-dropdown-item-border-radius, 0)}.dropdown-item:hover,.dropdown-item:focus{color:var(--bs-dropdown-link-hover-color);background-color:var(--bs-dropdown-link-hover-bg)}.dropdown-item.active,.dropdown-item:active{color:var(--bs-dropdown-link-active-color);text-decoration:none;background-color:var(--bs-dropdown-link-active-bg)}.dropdown-item.disabled,.dropdown-item:disabled{color:var(--bs-dropdown-link-disabled-color);pointer-events:none;background-color:rgba(0,0,0,0)}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:var(--bs-dropdown-header-padding-y) var(--bs-dropdown-header-padding-x);margin-bottom:0;font-size:0.875rem;color:var(--bs-dropdown-header-color);white-space:nowrap}.dropdown-item-text{display:block;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x);color:var(--bs-dropdown-link-color)}.dropdown-menu-dark{--bs-dropdown-color: #dee2e6;--bs-dropdown-bg: #343a40;--bs-dropdown-border-color: var(--bs-border-color-translucent);--bs-dropdown-box-shadow: ;--bs-dropdown-link-color: #dee2e6;--bs-dropdown-link-hover-color: #fff;--bs-dropdown-divider-bg: var(--bs-border-color-translucent);--bs-dropdown-link-hover-bg: var(--pst-color-surface);--bs-dropdown-link-active-color: #fff;--bs-dropdown-link-active-bg: var(--pst-color-surface);--bs-dropdown-link-disabled-color: #adb5bd;--bs-dropdown-header-color: #adb5bd}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;flex:1 1 auto}.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn:hover,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn.active{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group{border-radius:var(--bs-border-radius)}.btn-group>:not(.btn-check:first-child)+.btn,.btn-group>.btn-group:not(:first-child){margin-left:calc(var(--bs-border-width)*-1)}.btn-group>.btn:not(:last-child):not(.dropdown-toggle),.btn-group>.btn.dropdown-toggle-split:first-child,.btn-group>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:nth-child(n+3),.btn-group>:not(.btn-check)+.btn,.btn-group>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after,.dropend .dropdown-toggle-split::after{margin-left:0}.dropstart .dropdown-toggle-split::before{margin-right:0}.btn-sm+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-lg+.dropdown-toggle-split,.btn-group-lg>.btn+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child){margin-top:calc(var(--bs-border-width)*-1)}.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle),.btn-group-vertical>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn~.btn,.btn-group-vertical>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-top-right-radius:0}.nav{--bs-nav-link-padding-x: 1rem;--bs-nav-link-padding-y: 0.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color: var(--bs-link-color);--bs-nav-link-hover-color: var(--bs-link-hover-color);--bs-nav-link-disabled-color: var(--bs-secondary-color);display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:var(--bs-nav-link-padding-y) var(--bs-nav-link-padding-x);font-size:var(--bs-nav-link-font-size);font-weight:var(--bs-nav-link-font-weight);color:var(--bs-nav-link-color);text-decoration:none;background:none;border:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out}@media(prefers-reduced-motion: reduce){.nav-link{transition:none}}.nav-link:hover,.nav-link:focus{color:var(--bs-nav-link-hover-color)}.nav-link:focus-visible{outline:0;box-shadow:0 0 0 .1875rem var(--pst-color-accent)}.nav-link.disabled,.nav-link:disabled{color:var(--bs-nav-link-disabled-color);pointer-events:none;cursor:default}.nav-tabs{--bs-nav-tabs-border-width: var(--bs-border-width);--bs-nav-tabs-border-color: var(--bs-border-color);--bs-nav-tabs-border-radius: var(--bs-border-radius);--bs-nav-tabs-link-hover-border-color: var(--bs-secondary-bg) var(--bs-secondary-bg) var(--bs-border-color);--bs-nav-tabs-link-active-color: var(--bs-emphasis-color);--bs-nav-tabs-link-active-bg: var(--bs-body-bg);--bs-nav-tabs-link-active-border-color: var(--bs-border-color) var(--bs-border-color) var(--bs-body-bg);border-bottom:var(--bs-nav-tabs-border-width) solid var(--bs-nav-tabs-border-color)}.nav-tabs .nav-link{margin-bottom:calc(-1*var(--bs-nav-tabs-border-width));border:var(--bs-nav-tabs-border-width) solid rgba(0,0,0,0);border-top-left-radius:var(--bs-nav-tabs-border-radius);border-top-right-radius:var(--bs-nav-tabs-border-radius)}.nav-tabs .nav-link:hover,.nav-tabs .nav-link:focus{isolation:isolate;border-color:var(--bs-nav-tabs-link-hover-border-color)}.nav-tabs .nav-link.active,.nav-tabs .nav-item.show .nav-link{color:var(--bs-nav-tabs-link-active-color);background-color:var(--bs-nav-tabs-link-active-bg);border-color:var(--bs-nav-tabs-link-active-border-color)}.nav-tabs .dropdown-menu{margin-top:calc(-1*var(--bs-nav-tabs-border-width));border-top-left-radius:0;border-top-right-radius:0}.nav-pills{--bs-nav-pills-border-radius: var(--bs-border-radius);--bs-nav-pills-link-active-color: #fff;--bs-nav-pills-link-active-bg: #0d6efd}.nav-pills .nav-link{border-radius:var(--bs-nav-pills-border-radius)}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:var(--bs-nav-pills-link-active-color);background-color:var(--bs-nav-pills-link-active-bg)}.nav-underline{--bs-nav-underline-gap: 1rem;--bs-nav-underline-border-width: 0.125rem;--bs-nav-underline-link-active-color: var(--bs-emphasis-color);gap:var(--bs-nav-underline-gap)}.nav-underline .nav-link{padding-right:0;padding-left:0;border-bottom:var(--bs-nav-underline-border-width) solid rgba(0,0,0,0)}.nav-underline .nav-link:hover,.nav-underline .nav-link:focus{border-bottom-color:currentcolor}.nav-underline .nav-link.active,.nav-underline .show>.nav-link{font-weight:700;color:var(--bs-nav-underline-link-active-color);border-bottom-color:currentcolor}.nav-fill>.nav-link,.nav-fill .nav-item{flex:1 1 auto;text-align:center}.nav-justified>.nav-link,.nav-justified .nav-item{flex-basis:0;flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{--bs-navbar-padding-x: 0;--bs-navbar-padding-y: 0.5rem;--bs-navbar-color: rgba(var(--bs-emphasis-color-rgb), 0.65);--bs-navbar-hover-color: rgba(var(--bs-emphasis-color-rgb), 0.8);--bs-navbar-disabled-color: rgba(var(--bs-emphasis-color-rgb), 0.3);--bs-navbar-active-color: rgba(var(--bs-emphasis-color-rgb), 1);--bs-navbar-brand-padding-y: 0.3125rem;--bs-navbar-brand-margin-end: 1rem;--bs-navbar-brand-font-size: 1.25rem;--bs-navbar-brand-color: rgba(var(--bs-emphasis-color-rgb), 1);--bs-navbar-brand-hover-color: rgba(var(--bs-emphasis-color-rgb), 1);--bs-navbar-nav-link-padding-x: 0.5rem;--bs-navbar-toggler-padding-y: 0.25rem;--bs-navbar-toggler-padding-x: 0.75rem;--bs-navbar-toggler-font-size: 1.25rem;--bs-navbar-toggler-icon-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%2833, 37, 41, 0.75%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e\");--bs-navbar-toggler-border-color: rgba(var(--bs-emphasis-color-rgb), 0.15);--bs-navbar-toggler-border-radius: var(--bs-border-radius);--bs-navbar-toggler-focus-width: 0.1875rem;--bs-navbar-toggler-transition: box-shadow 0.15s ease-in-out;position:relative;display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between;padding:var(--bs-navbar-padding-y) var(--bs-navbar-padding-x)}.navbar>.container,.navbar>.container-fluid,.navbar>.container-sm,.navbar>.container-md,.navbar>.container-lg,.navbar>.container-xl{display:flex;flex-wrap:inherit;align-items:center;justify-content:space-between}.navbar-brand{padding-top:var(--bs-navbar-brand-padding-y);padding-bottom:var(--bs-navbar-brand-padding-y);margin-right:var(--bs-navbar-brand-margin-end);font-size:var(--bs-navbar-brand-font-size);color:var(--bs-navbar-brand-color);text-decoration:none;white-space:nowrap}.navbar-brand:hover,.navbar-brand:focus{color:var(--bs-navbar-brand-hover-color)}.navbar-nav{--bs-nav-link-padding-x: 0;--bs-nav-link-padding-y: 0.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color: var(--bs-navbar-color);--bs-nav-link-hover-color: var(--bs-navbar-hover-color);--bs-nav-link-disabled-color: var(--bs-navbar-disabled-color);display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link.active,.navbar-nav .nav-link.show{color:var(--bs-navbar-active-color)}.navbar-nav .dropdown-menu{position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem;color:var(--bs-navbar-color)}.navbar-text a,.navbar-text a:hover,.navbar-text a:focus{color:var(--bs-navbar-active-color)}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:var(--bs-navbar-toggler-padding-y) var(--bs-navbar-toggler-padding-x);font-size:var(--bs-navbar-toggler-font-size);line-height:1;color:var(--bs-navbar-color);background-color:rgba(0,0,0,0);border:var(--bs-border-width) solid var(--bs-navbar-toggler-border-color);border-radius:var(--bs-navbar-toggler-border-radius);transition:var(--bs-navbar-toggler-transition)}@media(prefers-reduced-motion: reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{text-decoration:none;outline:0;box-shadow:0 0 0 var(--bs-navbar-toggler-focus-width)}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;background-image:var(--bs-navbar-toggler-icon-bg);background-repeat:no-repeat;background-position:center;background-size:100%}.navbar-nav-scroll{max-height:var(--bs-scroll-height, 75vh);overflow-y:auto}@media(min-width: 540px){.navbar-expand-sm{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}.navbar-expand-sm .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-sm .offcanvas .offcanvas-header{display:none}.navbar-expand-sm .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media(min-width: 720px){.navbar-expand-md{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}.navbar-expand-md .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-md .offcanvas .offcanvas-header{display:none}.navbar-expand-md .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media(min-width: 960px){.navbar-expand-lg{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}.navbar-expand-lg .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-lg .offcanvas .offcanvas-header{display:none}.navbar-expand-lg .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media(min-width: 1200px){.navbar-expand-xl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}.navbar-expand-xl .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-xl .offcanvas .offcanvas-header{display:none}.navbar-expand-xl .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}.navbar-expand{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-expand .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand .offcanvas .offcanvas-header{display:none}.navbar-expand .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}.navbar-dark,.navbar[data-bs-theme=dark]{--bs-navbar-color: rgba(255, 255, 255, 0.55);--bs-navbar-hover-color: rgba(255, 255, 255, 0.75);--bs-navbar-disabled-color: rgba(255, 255, 255, 0.25);--bs-navbar-active-color: #fff;--bs-navbar-brand-color: #fff;--bs-navbar-brand-hover-color: #fff;--bs-navbar-toggler-border-color: rgba(255, 255, 255, 0.1);--bs-navbar-toggler-icon-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e\")}[data-bs-theme=dark] .navbar-toggler-icon{--bs-navbar-toggler-icon-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e\")}.card{--bs-card-spacer-y: 1rem;--bs-card-spacer-x: 1rem;--bs-card-title-spacer-y: 0.5rem;--bs-card-title-color: ;--bs-card-subtitle-color: ;--bs-card-border-width: var(--bs-border-width);--bs-card-border-color: var(--bs-border-color-translucent);--bs-card-border-radius: var(--bs-border-radius);--bs-card-box-shadow: ;--bs-card-inner-border-radius: calc(var(--bs-border-radius) - (var(--bs-border-width)));--bs-card-cap-padding-y: 0.5rem;--bs-card-cap-padding-x: 1rem;--bs-card-cap-bg: rgba(var(--bs-body-color-rgb), 0.03);--bs-card-cap-color: ;--bs-card-height: ;--bs-card-color: ;--bs-card-bg: var(--bs-body-bg);--bs-card-img-overlay-padding: 1rem;--bs-card-group-margin: 0.75rem;position:relative;display:flex;flex-direction:column;min-width:0;height:var(--bs-card-height);color:var(--bs-body-color);word-wrap:break-word;background-color:var(--bs-card-bg);background-clip:border-box;border:var(--bs-card-border-width) solid var(--bs-card-border-color);border-radius:var(--bs-card-border-radius)}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:var(--bs-card-inner-border-radius);border-top-right-radius:var(--bs-card-inner-border-radius)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-right-radius:var(--bs-card-inner-border-radius);border-bottom-left-radius:var(--bs-card-inner-border-radius)}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{flex:1 1 auto;padding:var(--bs-card-spacer-y) var(--bs-card-spacer-x);color:var(--bs-card-color)}.card-title{margin-bottom:var(--bs-card-title-spacer-y);color:var(--bs-card-title-color)}.card-subtitle{margin-top:calc(-0.5*var(--bs-card-title-spacer-y));margin-bottom:0;color:var(--bs-card-subtitle-color)}.card-text:last-child{margin-bottom:0}.card-link+.card-link{margin-left:var(--bs-card-spacer-x)}.card-header{padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x);margin-bottom:0;color:var(--bs-card-cap-color);background-color:var(--bs-card-cap-bg);border-bottom:var(--bs-card-border-width) solid var(--bs-card-border-color)}.card-header:first-child{border-radius:var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius) 0 0}.card-footer{padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x);color:var(--bs-card-cap-color);background-color:var(--bs-card-cap-bg);border-top:var(--bs-card-border-width) solid var(--bs-card-border-color)}.card-footer:last-child{border-radius:0 0 var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius)}.card-header-tabs{margin-right:calc(-0.5*var(--bs-card-cap-padding-x));margin-bottom:calc(-1*var(--bs-card-cap-padding-y));margin-left:calc(-0.5*var(--bs-card-cap-padding-x));border-bottom:0}.card-header-tabs .nav-link.active{background-color:var(--bs-card-bg);border-bottom-color:var(--bs-card-bg)}.card-header-pills{margin-right:calc(-0.5*var(--bs-card-cap-padding-x));margin-left:calc(-0.5*var(--bs-card-cap-padding-x))}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:var(--bs-card-img-overlay-padding);border-radius:var(--bs-card-inner-border-radius)}.card-img,.card-img-top,.card-img-bottom{width:100%}.card-img,.card-img-top{border-top-left-radius:var(--bs-card-inner-border-radius);border-top-right-radius:var(--bs-card-inner-border-radius)}.card-img,.card-img-bottom{border-bottom-right-radius:var(--bs-card-inner-border-radius);border-bottom-left-radius:var(--bs-card-inner-border-radius)}.card-group>.card{margin-bottom:var(--bs-card-group-margin)}@media(min-width: 540px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-img-top,.card-group>.card:not(:last-child) .card-header{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-img-bottom,.card-group>.card:not(:last-child) .card-footer{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-img-top,.card-group>.card:not(:first-child) .card-header{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-img-bottom,.card-group>.card:not(:first-child) .card-footer{border-bottom-left-radius:0}}.accordion{--bs-accordion-color: var(--bs-body-color);--bs-accordion-bg: var(--bs-body-bg);--bs-accordion-transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, border-radius 0.15s ease;--bs-accordion-border-color: var(--bs-border-color);--bs-accordion-border-width: var(--bs-border-width);--bs-accordion-border-radius: var(--bs-border-radius);--bs-accordion-inner-border-radius: calc(var(--bs-border-radius) - (var(--bs-border-width)));--bs-accordion-btn-padding-x: 1.25rem;--bs-accordion-btn-padding-y: 1rem;--bs-accordion-btn-color: var(--bs-body-color);--bs-accordion-btn-bg: var(--bs-accordion-bg);--bs-accordion-btn-icon: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23212529' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e\");--bs-accordion-btn-icon-width: 1.25rem;--bs-accordion-btn-icon-transform: rotate(-180deg);--bs-accordion-btn-icon-transition: transform 0.2s ease-in-out;--bs-accordion-btn-active-icon: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23052c65' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e\");--bs-accordion-btn-focus-box-shadow: 0 0 0 0.1875rem var(--pst-color-accent);--bs-accordion-body-padding-x: 1.25rem;--bs-accordion-body-padding-y: 1rem;--bs-accordion-active-color: var(--bs-primary-text-emphasis);--bs-accordion-active-bg: var(--bs-primary-bg-subtle)}.accordion-button{position:relative;display:flex;align-items:center;width:100%;padding:var(--bs-accordion-btn-padding-y) var(--bs-accordion-btn-padding-x);font-size:1rem;color:var(--bs-accordion-btn-color);text-align:left;background-color:var(--bs-accordion-btn-bg);border:0;border-radius:0;overflow-anchor:none;transition:var(--bs-accordion-transition)}@media(prefers-reduced-motion: reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){color:var(--bs-accordion-active-color);background-color:var(--bs-accordion-active-bg);box-shadow:inset 0 calc(-1*var(--bs-accordion-border-width)) 0 var(--bs-accordion-border-color)}.accordion-button:not(.collapsed)::after{background-image:var(--bs-accordion-btn-active-icon);transform:var(--bs-accordion-btn-icon-transform)}.accordion-button::after{flex-shrink:0;width:var(--bs-accordion-btn-icon-width);height:var(--bs-accordion-btn-icon-width);margin-left:auto;content:\"\";background-image:var(--bs-accordion-btn-icon);background-repeat:no-repeat;background-size:var(--bs-accordion-btn-icon-width);transition:var(--bs-accordion-btn-icon-transition)}@media(prefers-reduced-motion: reduce){.accordion-button::after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{z-index:3;outline:0;box-shadow:var(--bs-accordion-btn-focus-box-shadow)}.accordion-header{margin-bottom:0}.accordion-item{color:var(--bs-accordion-color);background-color:var(--bs-accordion-bg);border:var(--bs-accordion-border-width) solid var(--bs-accordion-border-color)}.accordion-item:first-of-type{border-top-left-radius:var(--bs-accordion-border-radius);border-top-right-radius:var(--bs-accordion-border-radius)}.accordion-item:first-of-type>.accordion-header .accordion-button{border-top-left-radius:var(--bs-accordion-inner-border-radius);border-top-right-radius:var(--bs-accordion-inner-border-radius)}.accordion-item:not(:first-of-type){border-top:0}.accordion-item:last-of-type{border-bottom-right-radius:var(--bs-accordion-border-radius);border-bottom-left-radius:var(--bs-accordion-border-radius)}.accordion-item:last-of-type>.accordion-header .accordion-button.collapsed{border-bottom-right-radius:var(--bs-accordion-inner-border-radius);border-bottom-left-radius:var(--bs-accordion-inner-border-radius)}.accordion-item:last-of-type>.accordion-collapse{border-bottom-right-radius:var(--bs-accordion-border-radius);border-bottom-left-radius:var(--bs-accordion-border-radius)}.accordion-body{padding:var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x)}.accordion-flush>.accordion-item{border-right:0;border-left:0;border-radius:0}.accordion-flush>.accordion-item:first-child{border-top:0}.accordion-flush>.accordion-item:last-child{border-bottom:0}.accordion-flush>.accordion-item>.accordion-header .accordion-button,.accordion-flush>.accordion-item>.accordion-header .accordion-button.collapsed{border-radius:0}.accordion-flush>.accordion-item>.accordion-collapse{border-radius:0}[data-bs-theme=dark] .accordion-button::after{--bs-accordion-btn-icon: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e\");--bs-accordion-btn-active-icon: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e\")}.breadcrumb{--bs-breadcrumb-padding-x: 0;--bs-breadcrumb-padding-y: 0;--bs-breadcrumb-margin-bottom: 1rem;--bs-breadcrumb-bg: ;--bs-breadcrumb-border-radius: ;--bs-breadcrumb-divider-color: var(--bs-secondary-color);--bs-breadcrumb-item-padding-x: 0.5rem;--bs-breadcrumb-item-active-color: var(--bs-secondary-color);display:flex;flex-wrap:wrap;padding:var(--bs-breadcrumb-padding-y) var(--bs-breadcrumb-padding-x);margin-bottom:var(--bs-breadcrumb-margin-bottom);font-size:var(--bs-breadcrumb-font-size);list-style:none;background-color:var(--bs-breadcrumb-bg);border-radius:var(--bs-breadcrumb-border-radius)}.breadcrumb-item+.breadcrumb-item{padding-left:var(--bs-breadcrumb-item-padding-x)}.breadcrumb-item+.breadcrumb-item::before{float:left;padding-right:var(--bs-breadcrumb-item-padding-x);color:var(--bs-breadcrumb-divider-color);content:var(--bs-breadcrumb-divider, \"/\") /* rtl: var(--bs-breadcrumb-divider, \"/\") */}.breadcrumb-item.active{color:var(--bs-breadcrumb-item-active-color)}.pagination{--bs-pagination-padding-x: 0.75rem;--bs-pagination-padding-y: 0.375rem;--bs-pagination-font-size:1rem;--bs-pagination-color: var(--bs-link-color);--bs-pagination-bg: var(--bs-body-bg);--bs-pagination-border-width: var(--bs-border-width);--bs-pagination-border-color: var(--bs-border-color);--bs-pagination-border-radius: var(--bs-border-radius);--bs-pagination-hover-color: var(--bs-link-hover-color);--bs-pagination-hover-bg: var(--bs-tertiary-bg);--bs-pagination-hover-border-color: var(--bs-border-color);--bs-pagination-focus-color: var(--bs-link-hover-color);--bs-pagination-focus-bg: var(--bs-secondary-bg);--bs-pagination-focus-box-shadow: 0 0 0 0.1875rem var(--pst-color-accent);--bs-pagination-active-color: #fff;--bs-pagination-active-bg: #0d6efd;--bs-pagination-active-border-color: #0d6efd;--bs-pagination-disabled-color: var(--bs-secondary-color);--bs-pagination-disabled-bg: var(--bs-secondary-bg);--bs-pagination-disabled-border-color: var(--bs-border-color);display:flex;padding-left:0;list-style:none}.page-link{position:relative;display:block;padding:var(--bs-pagination-padding-y) var(--bs-pagination-padding-x);font-size:var(--bs-pagination-font-size);color:var(--bs-pagination-color);text-decoration:none;background-color:var(--bs-pagination-bg);border:var(--bs-pagination-border-width) solid var(--bs-pagination-border-color);transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.page-link{transition:none}}.page-link:hover{z-index:2;color:var(--bs-pagination-hover-color);background-color:var(--bs-pagination-hover-bg);border-color:var(--bs-pagination-hover-border-color)}.page-link:focus{z-index:3;color:var(--bs-pagination-focus-color);background-color:var(--bs-pagination-focus-bg);outline:0;box-shadow:var(--bs-pagination-focus-box-shadow)}.page-link.active,.active>.page-link{z-index:3;color:var(--bs-pagination-active-color);background-color:var(--bs-pagination-active-bg);border-color:var(--bs-pagination-active-border-color)}.page-link.disabled,.disabled>.page-link{color:var(--bs-pagination-disabled-color);pointer-events:none;background-color:var(--bs-pagination-disabled-bg);border-color:var(--bs-pagination-disabled-border-color)}.page-item:not(:first-child) .page-link{margin-left:calc(var(--bs-border-width)*-1)}.page-item:first-child .page-link{border-top-left-radius:var(--bs-pagination-border-radius);border-bottom-left-radius:var(--bs-pagination-border-radius)}.page-item:last-child .page-link{border-top-right-radius:var(--bs-pagination-border-radius);border-bottom-right-radius:var(--bs-pagination-border-radius)}.pagination-lg{--bs-pagination-padding-x: 1.5rem;--bs-pagination-padding-y: 0.75rem;--bs-pagination-font-size:1.25rem;--bs-pagination-border-radius: var(--bs-border-radius-lg)}.pagination-sm{--bs-pagination-padding-x: 0.5rem;--bs-pagination-padding-y: 0.25rem;--bs-pagination-font-size:0.875rem;--bs-pagination-border-radius: var(--bs-border-radius-sm)}.badge{--bs-badge-padding-x: 0.65em;--bs-badge-padding-y: 0.35em;--bs-badge-font-size:0.75em;--bs-badge-font-weight: 700;--bs-badge-color: #fff;--bs-badge-border-radius: var(--bs-border-radius);display:inline-block;padding:var(--bs-badge-padding-y) var(--bs-badge-padding-x);font-size:var(--bs-badge-font-size);font-weight:var(--bs-badge-font-weight);line-height:1;color:var(--bs-badge-color);text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:var(--bs-badge-border-radius)}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{--bs-alert-bg: transparent;--bs-alert-padding-x: 1rem;--bs-alert-padding-y: 1rem;--bs-alert-margin-bottom: 1rem;--bs-alert-color: inherit;--bs-alert-border-color: transparent;--bs-alert-border: var(--bs-border-width) solid var(--bs-alert-border-color);--bs-alert-border-radius: var(--bs-border-radius);--bs-alert-link-color: inherit;position:relative;padding:var(--bs-alert-padding-y) var(--bs-alert-padding-x);margin-bottom:var(--bs-alert-margin-bottom);color:var(--bs-alert-color);background-color:var(--bs-alert-bg);border:var(--bs-alert-border);border-radius:var(--bs-alert-border-radius)}.alert-heading{color:inherit}.alert-link{font-weight:700;color:var(--bs-alert-link-color)}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{position:absolute;top:0;right:0;z-index:2;padding:1.25rem 1rem}.alert-primary{--bs-alert-color: var(--bs-primary-text-emphasis);--bs-alert-bg: var(--bs-primary-bg-subtle);--bs-alert-border-color: var(--bs-primary-border-subtle);--bs-alert-link-color: var(--bs-primary-text-emphasis)}.alert-secondary{--bs-alert-color: var(--bs-secondary-text-emphasis);--bs-alert-bg: var(--bs-secondary-bg-subtle);--bs-alert-border-color: var(--bs-secondary-border-subtle);--bs-alert-link-color: var(--bs-secondary-text-emphasis)}.alert-success{--bs-alert-color: var(--bs-success-text-emphasis);--bs-alert-bg: var(--bs-success-bg-subtle);--bs-alert-border-color: var(--bs-success-border-subtle);--bs-alert-link-color: var(--bs-success-text-emphasis)}.alert-info{--bs-alert-color: var(--bs-info-text-emphasis);--bs-alert-bg: var(--bs-info-bg-subtle);--bs-alert-border-color: var(--bs-info-border-subtle);--bs-alert-link-color: var(--bs-info-text-emphasis)}.alert-warning{--bs-alert-color: var(--bs-warning-text-emphasis);--bs-alert-bg: var(--bs-warning-bg-subtle);--bs-alert-border-color: var(--bs-warning-border-subtle);--bs-alert-link-color: var(--bs-warning-text-emphasis)}.alert-danger{--bs-alert-color: var(--bs-danger-text-emphasis);--bs-alert-bg: var(--bs-danger-bg-subtle);--bs-alert-border-color: var(--bs-danger-border-subtle);--bs-alert-link-color: var(--bs-danger-text-emphasis)}.alert-light{--bs-alert-color: var(--bs-light-text-emphasis);--bs-alert-bg: var(--bs-light-bg-subtle);--bs-alert-border-color: var(--bs-light-border-subtle);--bs-alert-link-color: var(--bs-light-text-emphasis)}.alert-dark{--bs-alert-color: var(--bs-dark-text-emphasis);--bs-alert-bg: var(--bs-dark-bg-subtle);--bs-alert-border-color: var(--bs-dark-border-subtle);--bs-alert-link-color: var(--bs-dark-text-emphasis)}@keyframes progress-bar-stripes{0%{background-position-x:1rem}}.progress,.progress-stacked{--bs-progress-height: 1rem;--bs-progress-font-size:0.75rem;--bs-progress-bg: var(--bs-secondary-bg);--bs-progress-border-radius: var(--bs-border-radius);--bs-progress-box-shadow: var(--bs-box-shadow-inset);--bs-progress-bar-color: #fff;--bs-progress-bar-bg: #0d6efd;--bs-progress-bar-transition: width 0.6s ease;display:flex;height:var(--bs-progress-height);overflow:hidden;font-size:var(--bs-progress-font-size);background-color:var(--bs-progress-bg);border-radius:var(--bs-progress-border-radius)}.progress-bar{display:flex;flex-direction:column;justify-content:center;overflow:hidden;color:var(--bs-progress-bar-color);text-align:center;white-space:nowrap;background-color:var(--bs-progress-bar-bg);transition:var(--bs-progress-bar-transition)}@media(prefers-reduced-motion: reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);background-size:var(--bs-progress-height) var(--bs-progress-height)}.progress-stacked>.progress{overflow:visible}.progress-stacked>.progress>.progress-bar{width:100%}.progress-bar-animated{animation:1s linear infinite progress-bar-stripes}@media(prefers-reduced-motion: reduce){.progress-bar-animated{animation:none}}.list-group{--bs-list-group-color: var(--bs-body-color);--bs-list-group-bg: var(--bs-body-bg);--bs-list-group-border-color: var(--bs-border-color);--bs-list-group-border-width: var(--bs-border-width);--bs-list-group-border-radius: var(--bs-border-radius);--bs-list-group-item-padding-x: 1rem;--bs-list-group-item-padding-y: 0.5rem;--bs-list-group-action-color: var(--bs-secondary-color);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-tertiary-bg);--bs-list-group-action-active-color: var(--bs-body-color);--bs-list-group-action-active-bg: var(--bs-secondary-bg);--bs-list-group-disabled-color: var(--bs-secondary-color);--bs-list-group-disabled-bg: var(--bs-body-bg);--bs-list-group-active-color: #fff;--bs-list-group-active-bg: #0d6efd;--bs-list-group-active-border-color: #0d6efd;display:flex;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:var(--bs-list-group-border-radius)}.list-group-numbered{list-style-type:none;counter-reset:section}.list-group-numbered>.list-group-item::before{content:counters(section, \".\") \". \";counter-increment:section}.list-group-item-action{width:100%;color:var(--bs-list-group-action-color);text-align:inherit}.list-group-item-action:hover,.list-group-item-action:focus{z-index:1;color:var(--bs-list-group-action-hover-color);text-decoration:none;background-color:var(--bs-list-group-action-hover-bg)}.list-group-item-action:active{color:var(--bs-list-group-action-active-color);background-color:var(--bs-list-group-action-active-bg)}.list-group-item{position:relative;display:block;padding:var(--bs-list-group-item-padding-y) var(--bs-list-group-item-padding-x);color:var(--bs-list-group-color);text-decoration:none;background-color:var(--bs-list-group-bg);border:var(--bs-list-group-border-width) solid var(--bs-list-group-border-color)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:var(--bs-list-group-disabled-color);pointer-events:none;background-color:var(--bs-list-group-disabled-bg)}.list-group-item.active{z-index:2;color:var(--bs-list-group-active-color);background-color:var(--bs-list-group-active-bg);border-color:var(--bs-list-group-active-border-color)}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:calc(-1*var(--bs-list-group-border-width));border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}@media(min-width: 540px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media(min-width: 720px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media(min-width: 960px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media(min-width: 1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 var(--bs-list-group-border-width)}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{--bs-list-group-color: var(--bs-primary-text-emphasis);--bs-list-group-bg: var(--bs-primary-bg-subtle);--bs-list-group-border-color: var(--bs-primary-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-primary-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-primary-border-subtle);--bs-list-group-active-color: var(--bs-primary-bg-subtle);--bs-list-group-active-bg: var(--bs-primary-text-emphasis);--bs-list-group-active-border-color: var(--bs-primary-text-emphasis)}.list-group-item-secondary{--bs-list-group-color: var(--bs-secondary-text-emphasis);--bs-list-group-bg: var(--bs-secondary-bg-subtle);--bs-list-group-border-color: var(--bs-secondary-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-secondary-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-secondary-border-subtle);--bs-list-group-active-color: var(--bs-secondary-bg-subtle);--bs-list-group-active-bg: var(--bs-secondary-text-emphasis);--bs-list-group-active-border-color: var(--bs-secondary-text-emphasis)}.list-group-item-success{--bs-list-group-color: var(--bs-success-text-emphasis);--bs-list-group-bg: var(--bs-success-bg-subtle);--bs-list-group-border-color: var(--bs-success-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-success-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-success-border-subtle);--bs-list-group-active-color: var(--bs-success-bg-subtle);--bs-list-group-active-bg: var(--bs-success-text-emphasis);--bs-list-group-active-border-color: var(--bs-success-text-emphasis)}.list-group-item-info{--bs-list-group-color: var(--bs-info-text-emphasis);--bs-list-group-bg: var(--bs-info-bg-subtle);--bs-list-group-border-color: var(--bs-info-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-info-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-info-border-subtle);--bs-list-group-active-color: var(--bs-info-bg-subtle);--bs-list-group-active-bg: var(--bs-info-text-emphasis);--bs-list-group-active-border-color: var(--bs-info-text-emphasis)}.list-group-item-warning{--bs-list-group-color: var(--bs-warning-text-emphasis);--bs-list-group-bg: var(--bs-warning-bg-subtle);--bs-list-group-border-color: var(--bs-warning-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-warning-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-warning-border-subtle);--bs-list-group-active-color: var(--bs-warning-bg-subtle);--bs-list-group-active-bg: var(--bs-warning-text-emphasis);--bs-list-group-active-border-color: var(--bs-warning-text-emphasis)}.list-group-item-danger{--bs-list-group-color: var(--bs-danger-text-emphasis);--bs-list-group-bg: var(--bs-danger-bg-subtle);--bs-list-group-border-color: var(--bs-danger-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-danger-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-danger-border-subtle);--bs-list-group-active-color: var(--bs-danger-bg-subtle);--bs-list-group-active-bg: var(--bs-danger-text-emphasis);--bs-list-group-active-border-color: var(--bs-danger-text-emphasis)}.list-group-item-light{--bs-list-group-color: var(--bs-light-text-emphasis);--bs-list-group-bg: var(--bs-light-bg-subtle);--bs-list-group-border-color: var(--bs-light-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-light-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-light-border-subtle);--bs-list-group-active-color: var(--bs-light-bg-subtle);--bs-list-group-active-bg: var(--bs-light-text-emphasis);--bs-list-group-active-border-color: var(--bs-light-text-emphasis)}.list-group-item-dark{--bs-list-group-color: var(--bs-dark-text-emphasis);--bs-list-group-bg: var(--bs-dark-bg-subtle);--bs-list-group-border-color: var(--bs-dark-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-dark-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-dark-border-subtle);--bs-list-group-active-color: var(--bs-dark-bg-subtle);--bs-list-group-active-bg: var(--bs-dark-text-emphasis);--bs-list-group-active-border-color: var(--bs-dark-text-emphasis)}.btn-close{--bs-btn-close-color: #000;--bs-btn-close-bg: url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 0 1 1.414 0L8 6.586 14.293.293a1 1 0 1 1 1.414 1.414L9.414 8l6.293 6.293a1 1 0 0 1-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 0 1-1.414-1.414L6.586 8 .293 1.707a1 1 0 0 1 0-1.414z'/%3e%3c/svg%3e\");--bs-btn-close-opacity: 0.5;--bs-btn-close-hover-opacity: 0.75;--bs-btn-close-focus-shadow: 0 0 0 0.1875rem var(--pst-color-accent);--bs-btn-close-focus-opacity: 1;--bs-btn-close-disabled-opacity: 0.25;--bs-btn-close-white-filter: invert(1) grayscale(100%) brightness(200%);box-sizing:content-box;width:1em;height:1em;padding:.25em .25em;color:var(--bs-btn-close-color);background:rgba(0,0,0,0) var(--bs-btn-close-bg) center/1em auto no-repeat;border:0;border-radius:.375rem;opacity:var(--bs-btn-close-opacity)}.btn-close:hover{color:var(--bs-btn-close-color);text-decoration:none;opacity:var(--bs-btn-close-hover-opacity)}.btn-close:focus{outline:0;box-shadow:var(--bs-btn-close-focus-shadow);opacity:var(--bs-btn-close-focus-opacity)}.btn-close:disabled,.btn-close.disabled{pointer-events:none;user-select:none;opacity:var(--bs-btn-close-disabled-opacity)}.btn-close-white{filter:var(--bs-btn-close-white-filter)}[data-bs-theme=dark] .btn-close{filter:var(--bs-btn-close-white-filter)}.toast{--bs-toast-zindex: 1090;--bs-toast-padding-x: 0.75rem;--bs-toast-padding-y: 0.5rem;--bs-toast-spacing: 1.5rem;--bs-toast-max-width: 350px;--bs-toast-font-size:0.875rem;--bs-toast-color: ;--bs-toast-bg: rgba(var(--bs-body-bg-rgb), 0.85);--bs-toast-border-width: var(--bs-border-width);--bs-toast-border-color: var(--bs-border-color-translucent);--bs-toast-border-radius: var(--bs-border-radius);--bs-toast-box-shadow: var(--bs-box-shadow);--bs-toast-header-color: var(--bs-secondary-color);--bs-toast-header-bg: rgba(var(--bs-body-bg-rgb), 0.85);--bs-toast-header-border-color: var(--bs-border-color-translucent);width:var(--bs-toast-max-width);max-width:100%;font-size:var(--bs-toast-font-size);color:var(--bs-toast-color);pointer-events:auto;background-color:var(--bs-toast-bg);background-clip:padding-box;border:var(--bs-toast-border-width) solid var(--bs-toast-border-color);box-shadow:var(--bs-toast-box-shadow);border-radius:var(--bs-toast-border-radius)}.toast.showing{opacity:0}.toast:not(.show){display:none}.toast-container{--bs-toast-zindex: 1090;position:absolute;z-index:var(--bs-toast-zindex);width:max-content;max-width:100%;pointer-events:none}.toast-container>:not(:last-child){margin-bottom:var(--bs-toast-spacing)}.toast-header{display:flex;align-items:center;padding:var(--bs-toast-padding-y) var(--bs-toast-padding-x);color:var(--bs-toast-header-color);background-color:var(--bs-toast-header-bg);background-clip:padding-box;border-bottom:var(--bs-toast-border-width) solid var(--bs-toast-header-border-color);border-top-left-radius:calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width));border-top-right-radius:calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width))}.toast-header .btn-close{margin-right:calc(-0.5*var(--bs-toast-padding-x));margin-left:var(--bs-toast-padding-x)}.toast-body{padding:var(--bs-toast-padding-x);word-wrap:break-word}.modal{--bs-modal-zindex: 1055;--bs-modal-width: 500px;--bs-modal-padding: 1rem;--bs-modal-margin: 0.5rem;--bs-modal-color: ;--bs-modal-bg: var(--bs-body-bg);--bs-modal-border-color: var(--bs-border-color-translucent);--bs-modal-border-width: var(--bs-border-width);--bs-modal-border-radius: var(--bs-border-radius-lg);--bs-modal-box-shadow: var(--bs-box-shadow-sm);--bs-modal-inner-border-radius: calc(var(--bs-border-radius-lg) - (var(--bs-border-width)));--bs-modal-header-padding-x: 1rem;--bs-modal-header-padding-y: 1rem;--bs-modal-header-padding: 1rem 1rem;--bs-modal-header-border-color: var(--bs-border-color);--bs-modal-header-border-width: var(--bs-border-width);--bs-modal-title-line-height: 1.5;--bs-modal-footer-gap: 0.5rem;--bs-modal-footer-bg: ;--bs-modal-footer-border-color: var(--bs-border-color);--bs-modal-footer-border-width: var(--bs-border-width);position:fixed;top:0;left:0;z-index:var(--bs-modal-zindex);display:none;width:100%;height:100%;overflow-x:hidden;overflow-y:auto;outline:0}.modal-dialog{position:relative;width:auto;margin:var(--bs-modal-margin);pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translate(0, -50px)}@media(prefers-reduced-motion: reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - var(--bs-modal-margin)*2)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - var(--bs-modal-margin)*2)}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;color:var(--bs-modal-color);pointer-events:auto;background-color:var(--bs-modal-bg);background-clip:padding-box;border:var(--bs-modal-border-width) solid var(--bs-modal-border-color);border-radius:var(--bs-modal-border-radius);outline:0}.modal-backdrop{--bs-backdrop-zindex: 1050;--bs-backdrop-bg: #000;--bs-backdrop-opacity: 0.5;position:fixed;top:0;left:0;z-index:var(--bs-backdrop-zindex);width:100vw;height:100vh;background-color:var(--bs-backdrop-bg)}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:var(--bs-backdrop-opacity)}.modal-header{display:flex;flex-shrink:0;align-items:center;padding:var(--bs-modal-header-padding);border-bottom:var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color);border-top-left-radius:var(--bs-modal-inner-border-radius);border-top-right-radius:var(--bs-modal-inner-border-radius)}.modal-header .btn-close{padding:calc(var(--bs-modal-header-padding-y)*.5) calc(var(--bs-modal-header-padding-x)*.5);margin:calc(-0.5*var(--bs-modal-header-padding-y)) calc(-0.5*var(--bs-modal-header-padding-x)) calc(-0.5*var(--bs-modal-header-padding-y)) auto}.modal-title{margin-bottom:0;line-height:var(--bs-modal-title-line-height)}.modal-body{position:relative;flex:1 1 auto;padding:var(--bs-modal-padding)}.modal-footer{display:flex;flex-shrink:0;flex-wrap:wrap;align-items:center;justify-content:flex-end;padding:calc(var(--bs-modal-padding) - var(--bs-modal-footer-gap)*.5);background-color:var(--bs-modal-footer-bg);border-top:var(--bs-modal-footer-border-width) solid var(--bs-modal-footer-border-color);border-bottom-right-radius:var(--bs-modal-inner-border-radius);border-bottom-left-radius:var(--bs-modal-inner-border-radius)}.modal-footer>*{margin:calc(var(--bs-modal-footer-gap)*.5)}@media(min-width: 540px){.modal{--bs-modal-margin: 1.75rem;--bs-modal-box-shadow: var(--bs-box-shadow)}.modal-dialog{max-width:var(--bs-modal-width);margin-right:auto;margin-left:auto}.modal-sm{--bs-modal-width: 300px}}@media(min-width: 960px){.modal-lg,.modal-xl{--bs-modal-width: 800px}}@media(min-width: 1200px){.modal-xl{--bs-modal-width: 1140px}}.modal-fullscreen{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen .modal-header,.modal-fullscreen .modal-footer{border-radius:0}.modal-fullscreen .modal-body{overflow-y:auto}@media(max-width: 539.98px){.modal-fullscreen-sm-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-sm-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-sm-down .modal-header,.modal-fullscreen-sm-down .modal-footer{border-radius:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}}@media(max-width: 719.98px){.modal-fullscreen-md-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-md-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-md-down .modal-header,.modal-fullscreen-md-down .modal-footer{border-radius:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}}@media(max-width: 959.98px){.modal-fullscreen-lg-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-lg-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-lg-down .modal-header,.modal-fullscreen-lg-down .modal-footer{border-radius:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}}@media(max-width: 1199.98px){.modal-fullscreen-xl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xl-down .modal-header,.modal-fullscreen-xl-down .modal-footer{border-radius:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}}.tooltip{--bs-tooltip-zindex: 1080;--bs-tooltip-max-width: 200px;--bs-tooltip-padding-x: 0.5rem;--bs-tooltip-padding-y: 0.25rem;--bs-tooltip-margin: ;--bs-tooltip-font-size:0.875rem;--bs-tooltip-color: var(--bs-body-bg);--bs-tooltip-bg: var(--bs-emphasis-color);--bs-tooltip-border-radius: var(--bs-border-radius);--bs-tooltip-opacity: 0.9;--bs-tooltip-arrow-width: 0.8rem;--bs-tooltip-arrow-height: 0.4rem;z-index:var(--bs-tooltip-zindex);display:block;margin:var(--bs-tooltip-margin);font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;font-size:var(--bs-tooltip-font-size);word-wrap:break-word;opacity:0}.tooltip.show{opacity:var(--bs-tooltip-opacity)}.tooltip .tooltip-arrow{display:block;width:var(--bs-tooltip-arrow-width);height:var(--bs-tooltip-arrow-height)}.tooltip .tooltip-arrow::before{position:absolute;content:\"\";border-color:rgba(0,0,0,0);border-style:solid}.bs-tooltip-top .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow{bottom:calc(-1*var(--bs-tooltip-arrow-height))}.bs-tooltip-top .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow::before{top:-1px;border-width:var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width)*.5) 0;border-top-color:var(--bs-tooltip-bg)}.bs-tooltip-end .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow{left:calc(-1*var(--bs-tooltip-arrow-height));width:var(--bs-tooltip-arrow-height);height:var(--bs-tooltip-arrow-width)}.bs-tooltip-end .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow::before{right:-1px;border-width:calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width)*.5) 0;border-right-color:var(--bs-tooltip-bg)}.bs-tooltip-bottom .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow{top:calc(-1*var(--bs-tooltip-arrow-height))}.bs-tooltip-bottom .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow::before{bottom:-1px;border-width:0 calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height);border-bottom-color:var(--bs-tooltip-bg)}.bs-tooltip-start .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow{right:calc(-1*var(--bs-tooltip-arrow-height));width:var(--bs-tooltip-arrow-height);height:var(--bs-tooltip-arrow-width)}.bs-tooltip-start .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow::before{left:-1px;border-width:calc(var(--bs-tooltip-arrow-width)*.5) 0 calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height);border-left-color:var(--bs-tooltip-bg)}.tooltip-inner{max-width:var(--bs-tooltip-max-width);padding:var(--bs-tooltip-padding-y) var(--bs-tooltip-padding-x);color:var(--bs-tooltip-color);text-align:center;background-color:var(--bs-tooltip-bg);border-radius:var(--bs-tooltip-border-radius)}.popover{--bs-popover-zindex: 1070;--bs-popover-max-width: 276px;--bs-popover-font-size:0.875rem;--bs-popover-bg: var(--bs-body-bg);--bs-popover-border-width: var(--bs-border-width);--bs-popover-border-color: var(--bs-border-color-translucent);--bs-popover-border-radius: var(--bs-border-radius-lg);--bs-popover-inner-border-radius: calc(var(--bs-border-radius-lg) - var(--bs-border-width));--bs-popover-box-shadow: var(--bs-box-shadow);--bs-popover-header-padding-x: 1rem;--bs-popover-header-padding-y: 0.5rem;--bs-popover-header-font-size:1rem;--bs-popover-header-color: inherit;--bs-popover-header-bg: var(--bs-secondary-bg);--bs-popover-body-padding-x: 1rem;--bs-popover-body-padding-y: 1rem;--bs-popover-body-color: var(--bs-body-color);--bs-popover-arrow-width: 1rem;--bs-popover-arrow-height: 0.5rem;--bs-popover-arrow-border: var(--bs-popover-border-color);z-index:var(--bs-popover-zindex);display:block;max-width:var(--bs-popover-max-width);font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;font-size:var(--bs-popover-font-size);word-wrap:break-word;background-color:var(--bs-popover-bg);background-clip:padding-box;border:var(--bs-popover-border-width) solid var(--bs-popover-border-color);border-radius:var(--bs-popover-border-radius)}.popover .popover-arrow{display:block;width:var(--bs-popover-arrow-width);height:var(--bs-popover-arrow-height)}.popover .popover-arrow::before,.popover .popover-arrow::after{position:absolute;display:block;content:\"\";border-color:rgba(0,0,0,0);border-style:solid;border-width:0}.bs-popover-top>.popover-arrow,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow{bottom:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width))}.bs-popover-top>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before,.bs-popover-top>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after{border-width:var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width)*.5) 0}.bs-popover-top>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before{bottom:0;border-top-color:var(--bs-popover-arrow-border)}.bs-popover-top>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after{bottom:var(--bs-popover-border-width);border-top-color:var(--bs-popover-bg)}.bs-popover-end>.popover-arrow,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow{left:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height);height:var(--bs-popover-arrow-width)}.bs-popover-end>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before,.bs-popover-end>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after{border-width:calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width)*.5) 0}.bs-popover-end>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before{left:0;border-right-color:var(--bs-popover-arrow-border)}.bs-popover-end>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after{left:var(--bs-popover-border-width);border-right-color:var(--bs-popover-bg)}.bs-popover-bottom>.popover-arrow,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow{top:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width))}.bs-popover-bottom>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before,.bs-popover-bottom>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after{border-width:0 calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height)}.bs-popover-bottom>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before{top:0;border-bottom-color:var(--bs-popover-arrow-border)}.bs-popover-bottom>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after{top:var(--bs-popover-border-width);border-bottom-color:var(--bs-popover-bg)}.bs-popover-bottom .popover-header::before,.bs-popover-auto[data-popper-placement^=bottom] .popover-header::before{position:absolute;top:0;left:50%;display:block;width:var(--bs-popover-arrow-width);margin-left:calc(-0.5*var(--bs-popover-arrow-width));content:\"\";border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-header-bg)}.bs-popover-start>.popover-arrow,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow{right:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height);height:var(--bs-popover-arrow-width)}.bs-popover-start>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before,.bs-popover-start>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after{border-width:calc(var(--bs-popover-arrow-width)*.5) 0 calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height)}.bs-popover-start>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before{right:0;border-left-color:var(--bs-popover-arrow-border)}.bs-popover-start>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after{right:var(--bs-popover-border-width);border-left-color:var(--bs-popover-bg)}.popover-header{padding:var(--bs-popover-header-padding-y) var(--bs-popover-header-padding-x);margin-bottom:0;font-size:var(--bs-popover-header-font-size);color:var(--bs-popover-header-color);background-color:var(--bs-popover-header-bg);border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-border-color);border-top-left-radius:var(--bs-popover-inner-border-radius);border-top-right-radius:var(--bs-popover-inner-border-radius)}.popover-header:empty{display:none}.popover-body{padding:var(--bs-popover-body-padding-y) var(--bs-popover-body-padding-x);color:var(--bs-popover-body-color)}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner::after{display:block;clear:both;content:\"\"}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;backface-visibility:hidden;transition:transform .6s ease-in-out}@media(prefers-reduced-motion: reduce){.carousel-item{transition:none}}.carousel-item.active,.carousel-item-next,.carousel-item-prev{display:block}.carousel-item-next:not(.carousel-item-start),.active.carousel-item-end{transform:translateX(100%)}.carousel-item-prev:not(.carousel-item-end),.active.carousel-item-start{transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item.active,.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end{z-index:1;opacity:1}.carousel-fade .active.carousel-item-start,.carousel-fade .active.carousel-item-end{z-index:0;opacity:0;transition:opacity 0s .6s}@media(prefers-reduced-motion: reduce){.carousel-fade .active.carousel-item-start,.carousel-fade .active.carousel-item-end{transition:none}}.carousel-control-prev,.carousel-control-next{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;padding:0;color:#fff;text-align:center;background:none;border:0;opacity:.5;transition:opacity .15s ease}@media(prefers-reduced-motion: reduce){.carousel-control-prev,.carousel-control-next{transition:none}}.carousel-control-prev:hover,.carousel-control-prev:focus,.carousel-control-next:hover,.carousel-control-next:focus{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-prev-icon,.carousel-control-next-icon{display:inline-block;width:2rem;height:2rem;background-repeat:no-repeat;background-position:50%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e\") /*rtl:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e\")*/}.carousel-control-next-icon{background-image:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e\") /*rtl:url(\"data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e\")*/}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:2;display:flex;justify-content:center;padding:0;margin-right:15%;margin-bottom:1rem;margin-left:15%}.carousel-indicators [data-bs-target]{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;padding:0;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border:0;border-top:10px solid rgba(0,0,0,0);border-bottom:10px solid rgba(0,0,0,0);opacity:.5;transition:opacity .6s ease}@media(prefers-reduced-motion: reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:1.25rem;left:15%;padding-top:1.25rem;padding-bottom:1.25rem;color:#fff;text-align:center}.carousel-dark .carousel-control-prev-icon,.carousel-dark .carousel-control-next-icon{filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}[data-bs-theme=dark] .carousel .carousel-control-prev-icon,[data-bs-theme=dark] .carousel .carousel-control-next-icon,[data-bs-theme=dark].carousel .carousel-control-prev-icon,[data-bs-theme=dark].carousel .carousel-control-next-icon{filter:invert(1) grayscale(100)}[data-bs-theme=dark] .carousel .carousel-indicators [data-bs-target],[data-bs-theme=dark].carousel .carousel-indicators [data-bs-target]{background-color:#000}[data-bs-theme=dark] .carousel .carousel-caption,[data-bs-theme=dark].carousel .carousel-caption{color:#000}.spinner-grow,.spinner-border{display:inline-block;width:var(--bs-spinner-width);height:var(--bs-spinner-height);vertical-align:var(--bs-spinner-vertical-align);border-radius:50%;animation:var(--bs-spinner-animation-speed) linear infinite var(--bs-spinner-animation-name)}@keyframes spinner-border{to{transform:rotate(360deg) /* rtl:ignore */}}.spinner-border{--bs-spinner-width: 2rem;--bs-spinner-height: 2rem;--bs-spinner-vertical-align: -0.125em;--bs-spinner-border-width: 0.25em;--bs-spinner-animation-speed: 0.75s;--bs-spinner-animation-name: spinner-border;border:var(--bs-spinner-border-width) solid currentcolor;border-right-color:rgba(0,0,0,0)}.spinner-border-sm{--bs-spinner-width: 1rem;--bs-spinner-height: 1rem;--bs-spinner-border-width: 0.2em}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{--bs-spinner-width: 2rem;--bs-spinner-height: 2rem;--bs-spinner-vertical-align: -0.125em;--bs-spinner-animation-speed: 0.75s;--bs-spinner-animation-name: spinner-grow;background-color:currentcolor;opacity:0}.spinner-grow-sm{--bs-spinner-width: 1rem;--bs-spinner-height: 1rem}@media(prefers-reduced-motion: reduce){.spinner-border,.spinner-grow{--bs-spinner-animation-speed: 1.5s}}.offcanvas,.offcanvas-xl,.offcanvas-lg,.offcanvas-md,.offcanvas-sm{--bs-offcanvas-zindex: 1045;--bs-offcanvas-width: 400px;--bs-offcanvas-height: 30vh;--bs-offcanvas-padding-x: 1rem;--bs-offcanvas-padding-y: 1rem;--bs-offcanvas-color: var(--bs-body-color);--bs-offcanvas-bg: var(--bs-body-bg);--bs-offcanvas-border-width: var(--bs-border-width);--bs-offcanvas-border-color: var(--bs-border-color-translucent);--bs-offcanvas-box-shadow: var(--bs-box-shadow-sm);--bs-offcanvas-transition: transform 0.3s ease-in-out;--bs-offcanvas-title-line-height: 1.5}@media(max-width: 539.98px){.offcanvas-sm{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 539.98px)and (prefers-reduced-motion: reduce){.offcanvas-sm{transition:none}}@media(max-width: 539.98px){.offcanvas-sm.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-sm.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-sm.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-sm.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-sm.showing,.offcanvas-sm.show:not(.hiding){transform:none}.offcanvas-sm.showing,.offcanvas-sm.hiding,.offcanvas-sm.show{visibility:visible}}@media(min-width: 540px){.offcanvas-sm{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-sm .offcanvas-header{display:none}.offcanvas-sm .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}@media(max-width: 719.98px){.offcanvas-md{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 719.98px)and (prefers-reduced-motion: reduce){.offcanvas-md{transition:none}}@media(max-width: 719.98px){.offcanvas-md.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-md.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-md.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-md.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-md.showing,.offcanvas-md.show:not(.hiding){transform:none}.offcanvas-md.showing,.offcanvas-md.hiding,.offcanvas-md.show{visibility:visible}}@media(min-width: 720px){.offcanvas-md{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-md .offcanvas-header{display:none}.offcanvas-md .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}@media(max-width: 959.98px){.offcanvas-lg{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 959.98px)and (prefers-reduced-motion: reduce){.offcanvas-lg{transition:none}}@media(max-width: 959.98px){.offcanvas-lg.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-lg.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-lg.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-lg.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-lg.showing,.offcanvas-lg.show:not(.hiding){transform:none}.offcanvas-lg.showing,.offcanvas-lg.hiding,.offcanvas-lg.show{visibility:visible}}@media(min-width: 960px){.offcanvas-lg{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-lg .offcanvas-header{display:none}.offcanvas-lg .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}@media(max-width: 1199.98px){.offcanvas-xl{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 1199.98px)and (prefers-reduced-motion: reduce){.offcanvas-xl{transition:none}}@media(max-width: 1199.98px){.offcanvas-xl.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-xl.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-xl.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-xl.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-xl.showing,.offcanvas-xl.show:not(.hiding){transform:none}.offcanvas-xl.showing,.offcanvas-xl.hiding,.offcanvas-xl.show{visibility:visible}}@media(min-width: 1200px){.offcanvas-xl{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-xl .offcanvas-header{display:none}.offcanvas-xl .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}.offcanvas{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}@media(prefers-reduced-motion: reduce){.offcanvas{transition:none}}.offcanvas.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas.showing,.offcanvas.show:not(.hiding){transform:none}.offcanvas.showing,.offcanvas.hiding,.offcanvas.show{visibility:visible}.offcanvas-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.offcanvas-backdrop.fade{opacity:0}.offcanvas-backdrop.show{opacity:.5}.offcanvas-header{display:flex;align-items:center;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x)}.offcanvas-header .btn-close{padding:calc(var(--bs-offcanvas-padding-y)*.5) calc(var(--bs-offcanvas-padding-x)*.5);margin:calc(-0.5*var(--bs-offcanvas-padding-y)) calc(-0.5*var(--bs-offcanvas-padding-x)) calc(-0.5*var(--bs-offcanvas-padding-y)) auto}.offcanvas-title{margin-bottom:0;line-height:var(--bs-offcanvas-title-line-height)}.offcanvas-body{flex-grow:1;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x);overflow-y:auto}.placeholder{display:inline-block;min-height:1em;vertical-align:middle;cursor:wait;background-color:currentcolor;opacity:.5}.placeholder.btn::before{display:inline-block;content:\"\"}.placeholder-xs{min-height:.6em}.placeholder-sm{min-height:.8em}.placeholder-lg{min-height:1.2em}.placeholder-glow .placeholder{animation:placeholder-glow 2s ease-in-out infinite}@keyframes placeholder-glow{50%{opacity:.2}}.placeholder-wave{mask-image:linear-gradient(130deg, #000 55%, rgba(0, 0, 0, 0.8) 75%, #000 95%);mask-size:200% 100%;animation:placeholder-wave 2s linear infinite}@keyframes placeholder-wave{100%{mask-position:-200% 0%}}.clearfix::after{display:block;clear:both;content:\"\"}.text-bg-primary{color:#fff !important;background-color:RGBA(var(--bs-primary-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-secondary{color:#fff !important;background-color:RGBA(var(--bs-secondary-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-success{color:#fff !important;background-color:RGBA(var(--bs-success-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-info{color:#000 !important;background-color:RGBA(var(--bs-info-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-warning{color:#000 !important;background-color:RGBA(var(--bs-warning-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-danger{color:#fff !important;background-color:RGBA(var(--bs-danger-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-light{color:#000 !important;background-color:RGBA(var(--bs-light-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-dark{color:#fff !important;background-color:RGBA(var(--bs-dark-rgb), var(--bs-bg-opacity, 1)) !important}.link-primary{color:RGBA(var(--bs-primary-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-primary-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-primary:hover,.link-primary:focus{color:RGBA(10, 88, 202, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(10, 88, 202, var(--bs-link-underline-opacity, 1)) !important}.link-secondary{color:RGBA(var(--bs-secondary-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-secondary-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-secondary:hover,.link-secondary:focus{color:RGBA(86, 94, 100, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(86, 94, 100, var(--bs-link-underline-opacity, 1)) !important}.link-success{color:RGBA(var(--bs-success-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-success-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-success:hover,.link-success:focus{color:RGBA(20, 108, 67, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(20, 108, 67, var(--bs-link-underline-opacity, 1)) !important}.link-info{color:RGBA(var(--bs-info-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-info-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-info:hover,.link-info:focus{color:RGBA(61, 213, 243, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(61, 213, 243, var(--bs-link-underline-opacity, 1)) !important}.link-warning{color:RGBA(var(--bs-warning-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-warning-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-warning:hover,.link-warning:focus{color:RGBA(255, 205, 57, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(255, 205, 57, var(--bs-link-underline-opacity, 1)) !important}.link-danger{color:RGBA(var(--bs-danger-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-danger-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-danger:hover,.link-danger:focus{color:RGBA(176, 42, 55, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(176, 42, 55, var(--bs-link-underline-opacity, 1)) !important}.link-light{color:RGBA(var(--bs-light-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-light-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-light:hover,.link-light:focus{color:RGBA(249, 250, 251, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(249, 250, 251, var(--bs-link-underline-opacity, 1)) !important}.link-dark{color:RGBA(var(--bs-dark-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-dark-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-dark:hover,.link-dark:focus{color:RGBA(26, 30, 33, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(26, 30, 33, var(--bs-link-underline-opacity, 1)) !important}.link-body-emphasis{color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-body-emphasis:hover,.link-body-emphasis:focus{color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-opacity, 0.75)) !important;text-decoration-color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-underline-opacity, 0.75)) !important}.focus-ring:focus{outline:0;box-shadow:var(--bs-focus-ring-x, 0) var(--bs-focus-ring-y, 0) var(--bs-focus-ring-blur, 0) var(--bs-focus-ring-width) var(--bs-focus-ring-color)}.icon-link{display:inline-flex;gap:.375rem;align-items:center;text-decoration-color:rgba(var(--bs-link-color-rgb), var(--bs-link-opacity, 0.5));text-underline-offset:.25em;backface-visibility:hidden}.icon-link>.bi{flex-shrink:0;width:1em;height:1em;fill:currentcolor;transition:.2s ease-in-out transform}@media(prefers-reduced-motion: reduce){.icon-link>.bi{transition:none}}.icon-link-hover:hover>.bi,.icon-link-hover:focus-visible>.bi{transform:var(--bs-icon-link-transform, translate3d(0.25em, 0, 0))}.ratio{position:relative;width:100%}.ratio::before{display:block;padding-top:var(--bs-aspect-ratio);content:\"\"}.ratio>*{position:absolute;top:0;left:0;width:100%;height:100%}.ratio-1x1{--bs-aspect-ratio: 100%}.ratio-4x3{--bs-aspect-ratio: 75%}.ratio-16x9{--bs-aspect-ratio: 56.25%}.ratio-21x9{--bs-aspect-ratio: 42.8571428571%}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}.sticky-top{position:sticky;top:0;z-index:1020}.sticky-bottom{position:sticky;bottom:0;z-index:1020}@media(min-width: 540px){.sticky-sm-top{position:sticky;top:0;z-index:1020}.sticky-sm-bottom{position:sticky;bottom:0;z-index:1020}}@media(min-width: 720px){.sticky-md-top{position:sticky;top:0;z-index:1020}.sticky-md-bottom{position:sticky;bottom:0;z-index:1020}}@media(min-width: 960px){.sticky-lg-top{position:sticky;top:0;z-index:1020}.sticky-lg-bottom{position:sticky;bottom:0;z-index:1020}}@media(min-width: 1200px){.sticky-xl-top{position:sticky;top:0;z-index:1020}.sticky-xl-bottom{position:sticky;bottom:0;z-index:1020}}.hstack{display:flex;flex-direction:row;align-items:center;align-self:stretch}.vstack{display:flex;flex:1 1 auto;flex-direction:column;align-self:stretch}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){width:1px !important;height:1px !important;padding:0 !important;margin:-1px !important;overflow:hidden !important;clip:rect(0, 0, 0, 0) !important;white-space:nowrap !important;border:0 !important}.visually-hidden:not(caption),.visually-hidden-focusable:not(:focus):not(:focus-within):not(caption){position:absolute !important}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:\"\"}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.vr{display:inline-block;align-self:stretch;width:var(--bs-border-width);min-height:1em;background-color:currentcolor;opacity:.25}.align-baseline{vertical-align:baseline !important}.align-top{vertical-align:top !important}.align-middle{vertical-align:middle !important}.align-bottom{vertical-align:bottom !important}.align-text-bottom{vertical-align:text-bottom !important}.align-text-top{vertical-align:text-top !important}.float-start{float:left !important}.float-end{float:right !important}.float-none{float:none !important}.object-fit-contain{object-fit:contain !important}.object-fit-cover{object-fit:cover !important}.object-fit-fill{object-fit:fill !important}.object-fit-scale{object-fit:scale-down !important}.object-fit-none{object-fit:none !important}.opacity-0{opacity:0 !important}.opacity-25{opacity:.25 !important}.opacity-50{opacity:.5 !important}.opacity-75{opacity:.75 !important}.opacity-100{opacity:1 !important}.overflow-auto{overflow:auto !important}.overflow-hidden{overflow:hidden !important}.overflow-visible{overflow:visible !important}.overflow-scroll{overflow:scroll !important}.overflow-x-auto{overflow-x:auto !important}.overflow-x-hidden{overflow-x:hidden !important}.overflow-x-visible{overflow-x:visible !important}.overflow-x-scroll{overflow-x:scroll !important}.overflow-y-auto{overflow-y:auto !important}.overflow-y-hidden{overflow-y:hidden !important}.overflow-y-visible{overflow-y:visible !important}.overflow-y-scroll{overflow-y:scroll !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-block{display:block !important}.d-grid{display:grid !important}.d-inline-grid{display:inline-grid !important}.d-table{display:table !important}.d-table-row{display:table-row !important}.d-table-cell{display:table-cell !important}.d-flex{display:flex !important}.d-inline-flex{display:inline-flex !important}.d-none{display:none !important}.shadow{box-shadow:var(--bs-box-shadow) !important}.shadow-sm{box-shadow:var(--bs-box-shadow-sm) !important}.shadow-lg{box-shadow:var(--bs-box-shadow-lg) !important}.shadow-none{box-shadow:none !important}.focus-ring-primary{--bs-focus-ring-color: rgba(var(--bs-primary-rgb), var(--bs-focus-ring-opacity))}.focus-ring-secondary{--bs-focus-ring-color: rgba(var(--bs-secondary-rgb), var(--bs-focus-ring-opacity))}.focus-ring-success{--bs-focus-ring-color: rgba(var(--bs-success-rgb), var(--bs-focus-ring-opacity))}.focus-ring-info{--bs-focus-ring-color: rgba(var(--bs-info-rgb), var(--bs-focus-ring-opacity))}.focus-ring-warning{--bs-focus-ring-color: rgba(var(--bs-warning-rgb), var(--bs-focus-ring-opacity))}.focus-ring-danger{--bs-focus-ring-color: rgba(var(--bs-danger-rgb), var(--bs-focus-ring-opacity))}.focus-ring-light{--bs-focus-ring-color: rgba(var(--bs-light-rgb), var(--bs-focus-ring-opacity))}.focus-ring-dark{--bs-focus-ring-color: rgba(var(--bs-dark-rgb), var(--bs-focus-ring-opacity))}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.position-sticky{position:sticky !important}.top-0{top:0 !important}.top-50{top:50% !important}.top-100{top:100% !important}.bottom-0{bottom:0 !important}.bottom-50{bottom:50% !important}.bottom-100{bottom:100% !important}.start-0{left:0 !important}.start-50{left:50% !important}.start-100{left:100% !important}.end-0{right:0 !important}.end-50{right:50% !important}.end-100{right:100% !important}.translate-middle{transform:translate(-50%, -50%) !important}.translate-middle-x{transform:translateX(-50%) !important}.translate-middle-y{transform:translateY(-50%) !important}.border{border:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-0{border:0 !important}.border-top{border-top:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-top-0{border-top:0 !important}.border-end{border-right:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-end-0{border-right:0 !important}.border-bottom{border-bottom:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-bottom-0{border-bottom:0 !important}.border-start{border-left:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-start-0{border-left:0 !important}.border-primary{--bs-border-opacity: 1;border-color:rgba(var(--bs-primary-rgb), var(--bs-border-opacity)) !important}.border-secondary{--bs-border-opacity: 1;border-color:rgba(var(--bs-secondary-rgb), var(--bs-border-opacity)) !important}.border-success{--bs-border-opacity: 1;border-color:rgba(var(--bs-success-rgb), var(--bs-border-opacity)) !important}.border-info{--bs-border-opacity: 1;border-color:rgba(var(--bs-info-rgb), var(--bs-border-opacity)) !important}.border-warning{--bs-border-opacity: 1;border-color:rgba(var(--bs-warning-rgb), var(--bs-border-opacity)) !important}.border-danger{--bs-border-opacity: 1;border-color:rgba(var(--bs-danger-rgb), var(--bs-border-opacity)) !important}.border-light{--bs-border-opacity: 1;border-color:rgba(var(--bs-light-rgb), var(--bs-border-opacity)) !important}.border-dark{--bs-border-opacity: 1;border-color:rgba(var(--bs-dark-rgb), var(--bs-border-opacity)) !important}.border-black{--bs-border-opacity: 1;border-color:rgba(var(--bs-black-rgb), var(--bs-border-opacity)) !important}.border-white{--bs-border-opacity: 1;border-color:rgba(var(--bs-white-rgb), var(--bs-border-opacity)) !important}.border-primary-subtle{border-color:var(--bs-primary-border-subtle) !important}.border-secondary-subtle{border-color:var(--bs-secondary-border-subtle) !important}.border-success-subtle{border-color:var(--bs-success-border-subtle) !important}.border-info-subtle{border-color:var(--bs-info-border-subtle) !important}.border-warning-subtle{border-color:var(--bs-warning-border-subtle) !important}.border-danger-subtle{border-color:var(--bs-danger-border-subtle) !important}.border-light-subtle{border-color:var(--bs-light-border-subtle) !important}.border-dark-subtle{border-color:var(--bs-dark-border-subtle) !important}.border-1{border-width:1px !important}.border-2{border-width:2px !important}.border-3{border-width:3px !important}.border-4{border-width:4px !important}.border-5{border-width:5px !important}.border-opacity-10{--bs-border-opacity: 0.1}.border-opacity-25{--bs-border-opacity: 0.25}.border-opacity-50{--bs-border-opacity: 0.5}.border-opacity-75{--bs-border-opacity: 0.75}.border-opacity-100{--bs-border-opacity: 1}.w-25{width:25% !important}.w-50{width:50% !important}.w-75{width:75% !important}.w-100{width:100% !important}.w-auto{width:auto !important}.mw-100{max-width:100% !important}.vw-100{width:100vw !important}.min-vw-100{min-width:100vw !important}.h-25{height:25% !important}.h-50{height:50% !important}.h-75{height:75% !important}.h-100{height:100% !important}.h-auto{height:auto !important}.mh-100{max-height:100% !important}.vh-100{height:100vh !important}.min-vh-100{min-height:100vh !important}.flex-fill{flex:1 1 auto !important}.flex-row{flex-direction:row !important}.flex-column{flex-direction:column !important}.flex-row-reverse{flex-direction:row-reverse !important}.flex-column-reverse{flex-direction:column-reverse !important}.flex-grow-0{flex-grow:0 !important}.flex-grow-1{flex-grow:1 !important}.flex-shrink-0{flex-shrink:0 !important}.flex-shrink-1{flex-shrink:1 !important}.flex-wrap{flex-wrap:wrap !important}.flex-nowrap{flex-wrap:nowrap !important}.flex-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-start{justify-content:flex-start !important}.justify-content-end{justify-content:flex-end !important}.justify-content-center{justify-content:center !important}.justify-content-between{justify-content:space-between !important}.justify-content-around{justify-content:space-around !important}.justify-content-evenly{justify-content:space-evenly !important}.align-items-start{align-items:flex-start !important}.align-items-end{align-items:flex-end !important}.align-items-center{align-items:center !important}.align-items-baseline{align-items:baseline !important}.align-items-stretch{align-items:stretch !important}.align-content-start{align-content:flex-start !important}.align-content-end{align-content:flex-end !important}.align-content-center{align-content:center !important}.align-content-between{align-content:space-between !important}.align-content-around{align-content:space-around !important}.align-content-stretch{align-content:stretch !important}.align-self-auto{align-self:auto !important}.align-self-start{align-self:flex-start !important}.align-self-end{align-self:flex-end !important}.align-self-center{align-self:center !important}.align-self-baseline{align-self:baseline !important}.align-self-stretch{align-self:stretch !important}.order-first{order:-1 !important}.order-0{order:0 !important}.order-1{order:1 !important}.order-2{order:2 !important}.order-3{order:3 !important}.order-4{order:4 !important}.order-5{order:5 !important}.order-last{order:6 !important}.m-0{margin:0 !important}.m-1{margin:.25rem !important}.m-2{margin:.5rem !important}.m-3{margin:1rem !important}.m-4{margin:1.5rem !important}.m-5{margin:3rem !important}.m-auto{margin:auto !important}.mx-0{margin-right:0 !important;margin-left:0 !important}.mx-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-3{margin-right:1rem !important;margin-left:1rem !important}.mx-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-5{margin-right:3rem !important;margin-left:3rem !important}.mx-auto{margin-right:auto !important;margin-left:auto !important}.my-0{margin-top:0 !important;margin-bottom:0 !important}.my-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-0{margin-top:0 !important}.mt-1{margin-top:.25rem !important}.mt-2{margin-top:.5rem !important}.mt-3{margin-top:1rem !important}.mt-4{margin-top:1.5rem !important}.mt-5{margin-top:3rem !important}.mt-auto{margin-top:auto !important}.me-0{margin-right:0 !important}.me-1{margin-right:.25rem !important}.me-2{margin-right:.5rem !important}.me-3{margin-right:1rem !important}.me-4{margin-right:1.5rem !important}.me-5{margin-right:3rem !important}.me-auto{margin-right:auto !important}.mb-0{margin-bottom:0 !important}.mb-1{margin-bottom:.25rem !important}.mb-2{margin-bottom:.5rem !important}.mb-3{margin-bottom:1rem !important}.mb-4{margin-bottom:1.5rem !important}.mb-5{margin-bottom:3rem !important}.mb-auto{margin-bottom:auto !important}.ms-0{margin-left:0 !important}.ms-1{margin-left:.25rem !important}.ms-2{margin-left:.5rem !important}.ms-3{margin-left:1rem !important}.ms-4{margin-left:1.5rem !important}.ms-5{margin-left:3rem !important}.ms-auto{margin-left:auto !important}.p-0{padding:0 !important}.p-1{padding:.25rem !important}.p-2{padding:.5rem !important}.p-3{padding:1rem !important}.p-4{padding:1.5rem !important}.p-5{padding:3rem !important}.px-0{padding-right:0 !important;padding-left:0 !important}.px-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-3{padding-right:1rem !important;padding-left:1rem !important}.px-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-5{padding-right:3rem !important;padding-left:3rem !important}.py-0{padding-top:0 !important;padding-bottom:0 !important}.py-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-0{padding-top:0 !important}.pt-1{padding-top:.25rem !important}.pt-2{padding-top:.5rem !important}.pt-3{padding-top:1rem !important}.pt-4{padding-top:1.5rem !important}.pt-5{padding-top:3rem !important}.pe-0{padding-right:0 !important}.pe-1{padding-right:.25rem !important}.pe-2{padding-right:.5rem !important}.pe-3{padding-right:1rem !important}.pe-4{padding-right:1.5rem !important}.pe-5{padding-right:3rem !important}.pb-0{padding-bottom:0 !important}.pb-1{padding-bottom:.25rem !important}.pb-2{padding-bottom:.5rem !important}.pb-3{padding-bottom:1rem !important}.pb-4{padding-bottom:1.5rem !important}.pb-5{padding-bottom:3rem !important}.ps-0{padding-left:0 !important}.ps-1{padding-left:.25rem !important}.ps-2{padding-left:.5rem !important}.ps-3{padding-left:1rem !important}.ps-4{padding-left:1.5rem !important}.ps-5{padding-left:3rem !important}.gap-0{gap:0 !important}.gap-1{gap:.25rem !important}.gap-2{gap:.5rem !important}.gap-3{gap:1rem !important}.gap-4{gap:1.5rem !important}.gap-5{gap:3rem !important}.row-gap-0{row-gap:0 !important}.row-gap-1{row-gap:.25rem !important}.row-gap-2{row-gap:.5rem !important}.row-gap-3{row-gap:1rem !important}.row-gap-4{row-gap:1.5rem !important}.row-gap-5{row-gap:3rem !important}.column-gap-0{column-gap:0 !important}.column-gap-1{column-gap:.25rem !important}.column-gap-2{column-gap:.5rem !important}.column-gap-3{column-gap:1rem !important}.column-gap-4{column-gap:1.5rem !important}.column-gap-5{column-gap:3rem !important}.font-monospace{font-family:var(--bs-font-monospace) !important}.fs-1{font-size:calc(1.375rem + 1.5vw) !important}.fs-2{font-size:calc(1.325rem + 0.9vw) !important}.fs-3{font-size:calc(1.3rem + 0.6vw) !important}.fs-4{font-size:calc(1.275rem + 0.3vw) !important}.fs-5{font-size:1.25rem !important}.fs-6{font-size:1rem !important}.fst-italic{font-style:italic !important}.fst-normal{font-style:normal !important}.fw-lighter{font-weight:lighter !important}.fw-light{font-weight:300 !important}.fw-normal{font-weight:400 !important}.fw-medium{font-weight:500 !important}.fw-semibold{font-weight:600 !important}.fw-bold{font-weight:700 !important}.fw-bolder{font-weight:bolder !important}.lh-1{line-height:1 !important}.lh-sm{line-height:1.25 !important}.lh-base{line-height:1.5 !important}.lh-lg{line-height:2 !important}.text-start{text-align:left !important}.text-end{text-align:right !important}.text-center{text-align:center !important}.text-decoration-none{text-decoration:none !important}.text-decoration-underline{text-decoration:underline !important}.text-decoration-line-through{text-decoration:line-through !important}.text-lowercase{text-transform:lowercase !important}.text-uppercase{text-transform:uppercase !important}.text-capitalize{text-transform:capitalize !important}.text-wrap{white-space:normal !important}.text-nowrap{white-space:nowrap !important}.text-break{word-wrap:break-word !important;word-break:break-word !important}.text-primary{--bs-text-opacity: 1;color:rgba(var(--bs-primary-rgb), var(--bs-text-opacity)) !important}.text-secondary{--bs-text-opacity: 1;color:rgba(var(--bs-secondary-rgb), var(--bs-text-opacity)) !important}.text-success{--bs-text-opacity: 1;color:rgba(var(--bs-success-rgb), var(--bs-text-opacity)) !important}.text-info{--bs-text-opacity: 1;color:rgba(var(--bs-info-rgb), var(--bs-text-opacity)) !important}.text-warning{--bs-text-opacity: 1;color:rgba(var(--bs-warning-rgb), var(--bs-text-opacity)) !important}.text-danger{--bs-text-opacity: 1;color:rgba(var(--bs-danger-rgb), var(--bs-text-opacity)) !important}.text-light{--bs-text-opacity: 1;color:rgba(var(--bs-light-rgb), var(--bs-text-opacity)) !important}.text-dark{--bs-text-opacity: 1;color:rgba(var(--bs-dark-rgb), var(--bs-text-opacity)) !important}.text-black{--bs-text-opacity: 1;color:rgba(var(--bs-black-rgb), var(--bs-text-opacity)) !important}.text-white{--bs-text-opacity: 1;color:rgba(var(--bs-white-rgb), var(--bs-text-opacity)) !important}.text-body{--bs-text-opacity: 1;color:rgba(var(--bs-body-color-rgb), var(--bs-text-opacity)) !important}.text-muted{--bs-text-opacity: 1;color:var(--bs-secondary-color) !important}.text-black-50{--bs-text-opacity: 1;color:rgba(0,0,0,.5) !important}.text-white-50{--bs-text-opacity: 1;color:rgba(255,255,255,.5) !important}.text-body-secondary{--bs-text-opacity: 1;color:var(--bs-secondary-color) !important}.text-body-tertiary{--bs-text-opacity: 1;color:var(--bs-tertiary-color) !important}.text-body-emphasis{--bs-text-opacity: 1;color:var(--bs-emphasis-color) !important}.text-reset{--bs-text-opacity: 1;color:inherit !important}.text-opacity-25{--bs-text-opacity: 0.25}.text-opacity-50{--bs-text-opacity: 0.5}.text-opacity-75{--bs-text-opacity: 0.75}.text-opacity-100{--bs-text-opacity: 1}.text-primary-emphasis{color:var(--bs-primary-text-emphasis) !important}.text-secondary-emphasis{color:var(--bs-secondary-text-emphasis) !important}.text-success-emphasis{color:var(--bs-success-text-emphasis) !important}.text-info-emphasis{color:var(--bs-info-text-emphasis) !important}.text-warning-emphasis{color:var(--bs-warning-text-emphasis) !important}.text-danger-emphasis{color:var(--bs-danger-text-emphasis) !important}.text-light-emphasis{color:var(--bs-light-text-emphasis) !important}.text-dark-emphasis{color:var(--bs-dark-text-emphasis) !important}.link-opacity-10{--bs-link-opacity: 0.1}.link-opacity-10-hover:hover{--bs-link-opacity: 0.1}.link-opacity-25{--bs-link-opacity: 0.25}.link-opacity-25-hover:hover{--bs-link-opacity: 0.25}.link-opacity-50{--bs-link-opacity: 0.5}.link-opacity-50-hover:hover{--bs-link-opacity: 0.5}.link-opacity-75{--bs-link-opacity: 0.75}.link-opacity-75-hover:hover{--bs-link-opacity: 0.75}.link-opacity-100{--bs-link-opacity: 1}.link-opacity-100-hover:hover{--bs-link-opacity: 1}.link-offset-1{text-underline-offset:.125em !important}.link-offset-1-hover:hover{text-underline-offset:.125em !important}.link-offset-2{text-underline-offset:.25em !important}.link-offset-2-hover:hover{text-underline-offset:.25em !important}.link-offset-3{text-underline-offset:.375em !important}.link-offset-3-hover:hover{text-underline-offset:.375em !important}.link-underline-primary{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-primary-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-secondary{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-secondary-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-success{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-success-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-info{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-info-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-warning{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-warning-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-danger{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-danger-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-light{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-light-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-dark{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-dark-rgb), var(--bs-link-underline-opacity)) !important}.link-underline{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-link-color-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-underline-opacity-0{--bs-link-underline-opacity: 0}.link-underline-opacity-0-hover:hover{--bs-link-underline-opacity: 0}.link-underline-opacity-10{--bs-link-underline-opacity: 0.1}.link-underline-opacity-10-hover:hover{--bs-link-underline-opacity: 0.1}.link-underline-opacity-25{--bs-link-underline-opacity: 0.25}.link-underline-opacity-25-hover:hover{--bs-link-underline-opacity: 0.25}.link-underline-opacity-50{--bs-link-underline-opacity: 0.5}.link-underline-opacity-50-hover:hover{--bs-link-underline-opacity: 0.5}.link-underline-opacity-75{--bs-link-underline-opacity: 0.75}.link-underline-opacity-75-hover:hover{--bs-link-underline-opacity: 0.75}.link-underline-opacity-100{--bs-link-underline-opacity: 1}.link-underline-opacity-100-hover:hover{--bs-link-underline-opacity: 1}.bg-primary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-primary-rgb), var(--bs-bg-opacity)) !important}.bg-secondary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-secondary-rgb), var(--bs-bg-opacity)) !important}.bg-success{--bs-bg-opacity: 1;background-color:rgba(var(--bs-success-rgb), var(--bs-bg-opacity)) !important}.bg-info{--bs-bg-opacity: 1;background-color:rgba(var(--bs-info-rgb), var(--bs-bg-opacity)) !important}.bg-warning{--bs-bg-opacity: 1;background-color:rgba(var(--bs-warning-rgb), var(--bs-bg-opacity)) !important}.bg-danger{--bs-bg-opacity: 1;background-color:rgba(var(--bs-danger-rgb), var(--bs-bg-opacity)) !important}.bg-light{--bs-bg-opacity: 1;background-color:rgba(var(--bs-light-rgb), var(--bs-bg-opacity)) !important}.bg-dark{--bs-bg-opacity: 1;background-color:rgba(var(--bs-dark-rgb), var(--bs-bg-opacity)) !important}.bg-black{--bs-bg-opacity: 1;background-color:rgba(var(--bs-black-rgb), var(--bs-bg-opacity)) !important}.bg-white{--bs-bg-opacity: 1;background-color:rgba(var(--bs-white-rgb), var(--bs-bg-opacity)) !important}.bg-body{--bs-bg-opacity: 1;background-color:rgba(var(--bs-body-bg-rgb), var(--bs-bg-opacity)) !important}.bg-transparent{--bs-bg-opacity: 1;background-color:rgba(0,0,0,0) !important}.bg-body-secondary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-secondary-bg-rgb), var(--bs-bg-opacity)) !important}.bg-body-tertiary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-tertiary-bg-rgb), var(--bs-bg-opacity)) !important}.bg-opacity-10{--bs-bg-opacity: 0.1}.bg-opacity-25{--bs-bg-opacity: 0.25}.bg-opacity-50{--bs-bg-opacity: 0.5}.bg-opacity-75{--bs-bg-opacity: 0.75}.bg-opacity-100{--bs-bg-opacity: 1}.bg-primary-subtle{background-color:var(--bs-primary-bg-subtle) !important}.bg-secondary-subtle{background-color:var(--bs-secondary-bg-subtle) !important}.bg-success-subtle{background-color:var(--bs-success-bg-subtle) !important}.bg-info-subtle{background-color:var(--bs-info-bg-subtle) !important}.bg-warning-subtle{background-color:var(--bs-warning-bg-subtle) !important}.bg-danger-subtle{background-color:var(--bs-danger-bg-subtle) !important}.bg-light-subtle{background-color:var(--bs-light-bg-subtle) !important}.bg-dark-subtle{background-color:var(--bs-dark-bg-subtle) !important}.bg-gradient{background-image:var(--bs-gradient) !important}.user-select-all{user-select:all !important}.user-select-auto{user-select:auto !important}.user-select-none{user-select:none !important}.pe-none{pointer-events:none !important}.pe-auto{pointer-events:auto !important}.rounded{border-radius:var(--bs-border-radius) !important}.rounded-0{border-radius:0 !important}.rounded-1{border-radius:var(--bs-border-radius-sm) !important}.rounded-2{border-radius:var(--bs-border-radius) !important}.rounded-3{border-radius:var(--bs-border-radius-lg) !important}.rounded-4{border-radius:var(--bs-border-radius-xl) !important}.rounded-5{border-radius:var(--bs-border-radius-xxl) !important}.rounded-circle{border-radius:50% !important}.rounded-pill{border-radius:var(--bs-border-radius-pill) !important}.rounded-top{border-top-left-radius:var(--bs-border-radius) !important;border-top-right-radius:var(--bs-border-radius) !important}.rounded-top-0{border-top-left-radius:0 !important;border-top-right-radius:0 !important}.rounded-top-1{border-top-left-radius:var(--bs-border-radius-sm) !important;border-top-right-radius:var(--bs-border-radius-sm) !important}.rounded-top-2{border-top-left-radius:var(--bs-border-radius) !important;border-top-right-radius:var(--bs-border-radius) !important}.rounded-top-3{border-top-left-radius:var(--bs-border-radius-lg) !important;border-top-right-radius:var(--bs-border-radius-lg) !important}.rounded-top-4{border-top-left-radius:var(--bs-border-radius-xl) !important;border-top-right-radius:var(--bs-border-radius-xl) !important}.rounded-top-5{border-top-left-radius:var(--bs-border-radius-xxl) !important;border-top-right-radius:var(--bs-border-radius-xxl) !important}.rounded-top-circle{border-top-left-radius:50% !important;border-top-right-radius:50% !important}.rounded-top-pill{border-top-left-radius:var(--bs-border-radius-pill) !important;border-top-right-radius:var(--bs-border-radius-pill) !important}.rounded-end{border-top-right-radius:var(--bs-border-radius) !important;border-bottom-right-radius:var(--bs-border-radius) !important}.rounded-end-0{border-top-right-radius:0 !important;border-bottom-right-radius:0 !important}.rounded-end-1{border-top-right-radius:var(--bs-border-radius-sm) !important;border-bottom-right-radius:var(--bs-border-radius-sm) !important}.rounded-end-2{border-top-right-radius:var(--bs-border-radius) !important;border-bottom-right-radius:var(--bs-border-radius) !important}.rounded-end-3{border-top-right-radius:var(--bs-border-radius-lg) !important;border-bottom-right-radius:var(--bs-border-radius-lg) !important}.rounded-end-4{border-top-right-radius:var(--bs-border-radius-xl) !important;border-bottom-right-radius:var(--bs-border-radius-xl) !important}.rounded-end-5{border-top-right-radius:var(--bs-border-radius-xxl) !important;border-bottom-right-radius:var(--bs-border-radius-xxl) !important}.rounded-end-circle{border-top-right-radius:50% !important;border-bottom-right-radius:50% !important}.rounded-end-pill{border-top-right-radius:var(--bs-border-radius-pill) !important;border-bottom-right-radius:var(--bs-border-radius-pill) !important}.rounded-bottom{border-bottom-right-radius:var(--bs-border-radius) !important;border-bottom-left-radius:var(--bs-border-radius) !important}.rounded-bottom-0{border-bottom-right-radius:0 !important;border-bottom-left-radius:0 !important}.rounded-bottom-1{border-bottom-right-radius:var(--bs-border-radius-sm) !important;border-bottom-left-radius:var(--bs-border-radius-sm) !important}.rounded-bottom-2{border-bottom-right-radius:var(--bs-border-radius) !important;border-bottom-left-radius:var(--bs-border-radius) !important}.rounded-bottom-3{border-bottom-right-radius:var(--bs-border-radius-lg) !important;border-bottom-left-radius:var(--bs-border-radius-lg) !important}.rounded-bottom-4{border-bottom-right-radius:var(--bs-border-radius-xl) !important;border-bottom-left-radius:var(--bs-border-radius-xl) !important}.rounded-bottom-5{border-bottom-right-radius:var(--bs-border-radius-xxl) !important;border-bottom-left-radius:var(--bs-border-radius-xxl) !important}.rounded-bottom-circle{border-bottom-right-radius:50% !important;border-bottom-left-radius:50% !important}.rounded-bottom-pill{border-bottom-right-radius:var(--bs-border-radius-pill) !important;border-bottom-left-radius:var(--bs-border-radius-pill) !important}.rounded-start{border-bottom-left-radius:var(--bs-border-radius) !important;border-top-left-radius:var(--bs-border-radius) !important}.rounded-start-0{border-bottom-left-radius:0 !important;border-top-left-radius:0 !important}.rounded-start-1{border-bottom-left-radius:var(--bs-border-radius-sm) !important;border-top-left-radius:var(--bs-border-radius-sm) !important}.rounded-start-2{border-bottom-left-radius:var(--bs-border-radius) !important;border-top-left-radius:var(--bs-border-radius) !important}.rounded-start-3{border-bottom-left-radius:var(--bs-border-radius-lg) !important;border-top-left-radius:var(--bs-border-radius-lg) !important}.rounded-start-4{border-bottom-left-radius:var(--bs-border-radius-xl) !important;border-top-left-radius:var(--bs-border-radius-xl) !important}.rounded-start-5{border-bottom-left-radius:var(--bs-border-radius-xxl) !important;border-top-left-radius:var(--bs-border-radius-xxl) !important}.rounded-start-circle{border-bottom-left-radius:50% !important;border-top-left-radius:50% !important}.rounded-start-pill{border-bottom-left-radius:var(--bs-border-radius-pill) !important;border-top-left-radius:var(--bs-border-radius-pill) !important}.visible{visibility:visible !important}.invisible{visibility:hidden !important}.z-n1{z-index:-1 !important}.z-0{z-index:0 !important}.z-1{z-index:1 !important}.z-2{z-index:2 !important}.z-3{z-index:3 !important}@media(min-width: 540px){.float-sm-start{float:left !important}.float-sm-end{float:right !important}.float-sm-none{float:none !important}.object-fit-sm-contain{object-fit:contain !important}.object-fit-sm-cover{object-fit:cover !important}.object-fit-sm-fill{object-fit:fill !important}.object-fit-sm-scale{object-fit:scale-down !important}.object-fit-sm-none{object-fit:none !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-block{display:block !important}.d-sm-grid{display:grid !important}.d-sm-inline-grid{display:inline-grid !important}.d-sm-table{display:table !important}.d-sm-table-row{display:table-row !important}.d-sm-table-cell{display:table-cell !important}.d-sm-flex{display:flex !important}.d-sm-inline-flex{display:inline-flex !important}.d-sm-none{display:none !important}.flex-sm-fill{flex:1 1 auto !important}.flex-sm-row{flex-direction:row !important}.flex-sm-column{flex-direction:column !important}.flex-sm-row-reverse{flex-direction:row-reverse !important}.flex-sm-column-reverse{flex-direction:column-reverse !important}.flex-sm-grow-0{flex-grow:0 !important}.flex-sm-grow-1{flex-grow:1 !important}.flex-sm-shrink-0{flex-shrink:0 !important}.flex-sm-shrink-1{flex-shrink:1 !important}.flex-sm-wrap{flex-wrap:wrap !important}.flex-sm-nowrap{flex-wrap:nowrap !important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-sm-start{justify-content:flex-start !important}.justify-content-sm-end{justify-content:flex-end !important}.justify-content-sm-center{justify-content:center !important}.justify-content-sm-between{justify-content:space-between !important}.justify-content-sm-around{justify-content:space-around !important}.justify-content-sm-evenly{justify-content:space-evenly !important}.align-items-sm-start{align-items:flex-start !important}.align-items-sm-end{align-items:flex-end !important}.align-items-sm-center{align-items:center !important}.align-items-sm-baseline{align-items:baseline !important}.align-items-sm-stretch{align-items:stretch !important}.align-content-sm-start{align-content:flex-start !important}.align-content-sm-end{align-content:flex-end !important}.align-content-sm-center{align-content:center !important}.align-content-sm-between{align-content:space-between !important}.align-content-sm-around{align-content:space-around !important}.align-content-sm-stretch{align-content:stretch !important}.align-self-sm-auto{align-self:auto !important}.align-self-sm-start{align-self:flex-start !important}.align-self-sm-end{align-self:flex-end !important}.align-self-sm-center{align-self:center !important}.align-self-sm-baseline{align-self:baseline !important}.align-self-sm-stretch{align-self:stretch !important}.order-sm-first{order:-1 !important}.order-sm-0{order:0 !important}.order-sm-1{order:1 !important}.order-sm-2{order:2 !important}.order-sm-3{order:3 !important}.order-sm-4{order:4 !important}.order-sm-5{order:5 !important}.order-sm-last{order:6 !important}.m-sm-0{margin:0 !important}.m-sm-1{margin:.25rem !important}.m-sm-2{margin:.5rem !important}.m-sm-3{margin:1rem !important}.m-sm-4{margin:1.5rem !important}.m-sm-5{margin:3rem !important}.m-sm-auto{margin:auto !important}.mx-sm-0{margin-right:0 !important;margin-left:0 !important}.mx-sm-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-sm-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-sm-3{margin-right:1rem !important;margin-left:1rem !important}.mx-sm-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-sm-5{margin-right:3rem !important;margin-left:3rem !important}.mx-sm-auto{margin-right:auto !important;margin-left:auto !important}.my-sm-0{margin-top:0 !important;margin-bottom:0 !important}.my-sm-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-sm-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-sm-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-sm-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-sm-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-sm-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-sm-0{margin-top:0 !important}.mt-sm-1{margin-top:.25rem !important}.mt-sm-2{margin-top:.5rem !important}.mt-sm-3{margin-top:1rem !important}.mt-sm-4{margin-top:1.5rem !important}.mt-sm-5{margin-top:3rem !important}.mt-sm-auto{margin-top:auto !important}.me-sm-0{margin-right:0 !important}.me-sm-1{margin-right:.25rem !important}.me-sm-2{margin-right:.5rem !important}.me-sm-3{margin-right:1rem !important}.me-sm-4{margin-right:1.5rem !important}.me-sm-5{margin-right:3rem !important}.me-sm-auto{margin-right:auto !important}.mb-sm-0{margin-bottom:0 !important}.mb-sm-1{margin-bottom:.25rem !important}.mb-sm-2{margin-bottom:.5rem !important}.mb-sm-3{margin-bottom:1rem !important}.mb-sm-4{margin-bottom:1.5rem !important}.mb-sm-5{margin-bottom:3rem !important}.mb-sm-auto{margin-bottom:auto !important}.ms-sm-0{margin-left:0 !important}.ms-sm-1{margin-left:.25rem !important}.ms-sm-2{margin-left:.5rem !important}.ms-sm-3{margin-left:1rem !important}.ms-sm-4{margin-left:1.5rem !important}.ms-sm-5{margin-left:3rem !important}.ms-sm-auto{margin-left:auto !important}.p-sm-0{padding:0 !important}.p-sm-1{padding:.25rem !important}.p-sm-2{padding:.5rem !important}.p-sm-3{padding:1rem !important}.p-sm-4{padding:1.5rem !important}.p-sm-5{padding:3rem !important}.px-sm-0{padding-right:0 !important;padding-left:0 !important}.px-sm-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-sm-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-sm-3{padding-right:1rem !important;padding-left:1rem !important}.px-sm-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-sm-5{padding-right:3rem !important;padding-left:3rem !important}.py-sm-0{padding-top:0 !important;padding-bottom:0 !important}.py-sm-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-sm-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-sm-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-sm-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-sm-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-sm-0{padding-top:0 !important}.pt-sm-1{padding-top:.25rem !important}.pt-sm-2{padding-top:.5rem !important}.pt-sm-3{padding-top:1rem !important}.pt-sm-4{padding-top:1.5rem !important}.pt-sm-5{padding-top:3rem !important}.pe-sm-0{padding-right:0 !important}.pe-sm-1{padding-right:.25rem !important}.pe-sm-2{padding-right:.5rem !important}.pe-sm-3{padding-right:1rem !important}.pe-sm-4{padding-right:1.5rem !important}.pe-sm-5{padding-right:3rem !important}.pb-sm-0{padding-bottom:0 !important}.pb-sm-1{padding-bottom:.25rem !important}.pb-sm-2{padding-bottom:.5rem !important}.pb-sm-3{padding-bottom:1rem !important}.pb-sm-4{padding-bottom:1.5rem !important}.pb-sm-5{padding-bottom:3rem !important}.ps-sm-0{padding-left:0 !important}.ps-sm-1{padding-left:.25rem !important}.ps-sm-2{padding-left:.5rem !important}.ps-sm-3{padding-left:1rem !important}.ps-sm-4{padding-left:1.5rem !important}.ps-sm-5{padding-left:3rem !important}.gap-sm-0{gap:0 !important}.gap-sm-1{gap:.25rem !important}.gap-sm-2{gap:.5rem !important}.gap-sm-3{gap:1rem !important}.gap-sm-4{gap:1.5rem !important}.gap-sm-5{gap:3rem !important}.row-gap-sm-0{row-gap:0 !important}.row-gap-sm-1{row-gap:.25rem !important}.row-gap-sm-2{row-gap:.5rem !important}.row-gap-sm-3{row-gap:1rem !important}.row-gap-sm-4{row-gap:1.5rem !important}.row-gap-sm-5{row-gap:3rem !important}.column-gap-sm-0{column-gap:0 !important}.column-gap-sm-1{column-gap:.25rem !important}.column-gap-sm-2{column-gap:.5rem !important}.column-gap-sm-3{column-gap:1rem !important}.column-gap-sm-4{column-gap:1.5rem !important}.column-gap-sm-5{column-gap:3rem !important}.text-sm-start{text-align:left !important}.text-sm-end{text-align:right !important}.text-sm-center{text-align:center !important}}@media(min-width: 720px){.float-md-start{float:left !important}.float-md-end{float:right !important}.float-md-none{float:none !important}.object-fit-md-contain{object-fit:contain !important}.object-fit-md-cover{object-fit:cover !important}.object-fit-md-fill{object-fit:fill !important}.object-fit-md-scale{object-fit:scale-down !important}.object-fit-md-none{object-fit:none !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-block{display:block !important}.d-md-grid{display:grid !important}.d-md-inline-grid{display:inline-grid !important}.d-md-table{display:table !important}.d-md-table-row{display:table-row !important}.d-md-table-cell{display:table-cell !important}.d-md-flex{display:flex !important}.d-md-inline-flex{display:inline-flex !important}.d-md-none{display:none !important}.flex-md-fill{flex:1 1 auto !important}.flex-md-row{flex-direction:row !important}.flex-md-column{flex-direction:column !important}.flex-md-row-reverse{flex-direction:row-reverse !important}.flex-md-column-reverse{flex-direction:column-reverse !important}.flex-md-grow-0{flex-grow:0 !important}.flex-md-grow-1{flex-grow:1 !important}.flex-md-shrink-0{flex-shrink:0 !important}.flex-md-shrink-1{flex-shrink:1 !important}.flex-md-wrap{flex-wrap:wrap !important}.flex-md-nowrap{flex-wrap:nowrap !important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-md-start{justify-content:flex-start !important}.justify-content-md-end{justify-content:flex-end !important}.justify-content-md-center{justify-content:center !important}.justify-content-md-between{justify-content:space-between !important}.justify-content-md-around{justify-content:space-around !important}.justify-content-md-evenly{justify-content:space-evenly !important}.align-items-md-start{align-items:flex-start !important}.align-items-md-end{align-items:flex-end !important}.align-items-md-center{align-items:center !important}.align-items-md-baseline{align-items:baseline !important}.align-items-md-stretch{align-items:stretch !important}.align-content-md-start{align-content:flex-start !important}.align-content-md-end{align-content:flex-end !important}.align-content-md-center{align-content:center !important}.align-content-md-between{align-content:space-between !important}.align-content-md-around{align-content:space-around !important}.align-content-md-stretch{align-content:stretch !important}.align-self-md-auto{align-self:auto !important}.align-self-md-start{align-self:flex-start !important}.align-self-md-end{align-self:flex-end !important}.align-self-md-center{align-self:center !important}.align-self-md-baseline{align-self:baseline !important}.align-self-md-stretch{align-self:stretch !important}.order-md-first{order:-1 !important}.order-md-0{order:0 !important}.order-md-1{order:1 !important}.order-md-2{order:2 !important}.order-md-3{order:3 !important}.order-md-4{order:4 !important}.order-md-5{order:5 !important}.order-md-last{order:6 !important}.m-md-0{margin:0 !important}.m-md-1{margin:.25rem !important}.m-md-2{margin:.5rem !important}.m-md-3{margin:1rem !important}.m-md-4{margin:1.5rem !important}.m-md-5{margin:3rem !important}.m-md-auto{margin:auto !important}.mx-md-0{margin-right:0 !important;margin-left:0 !important}.mx-md-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-md-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-md-3{margin-right:1rem !important;margin-left:1rem !important}.mx-md-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-md-5{margin-right:3rem !important;margin-left:3rem !important}.mx-md-auto{margin-right:auto !important;margin-left:auto !important}.my-md-0{margin-top:0 !important;margin-bottom:0 !important}.my-md-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-md-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-md-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-md-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-md-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-md-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-md-0{margin-top:0 !important}.mt-md-1{margin-top:.25rem !important}.mt-md-2{margin-top:.5rem !important}.mt-md-3{margin-top:1rem !important}.mt-md-4{margin-top:1.5rem !important}.mt-md-5{margin-top:3rem !important}.mt-md-auto{margin-top:auto !important}.me-md-0{margin-right:0 !important}.me-md-1{margin-right:.25rem !important}.me-md-2{margin-right:.5rem !important}.me-md-3{margin-right:1rem !important}.me-md-4{margin-right:1.5rem !important}.me-md-5{margin-right:3rem !important}.me-md-auto{margin-right:auto !important}.mb-md-0{margin-bottom:0 !important}.mb-md-1{margin-bottom:.25rem !important}.mb-md-2{margin-bottom:.5rem !important}.mb-md-3{margin-bottom:1rem !important}.mb-md-4{margin-bottom:1.5rem !important}.mb-md-5{margin-bottom:3rem !important}.mb-md-auto{margin-bottom:auto !important}.ms-md-0{margin-left:0 !important}.ms-md-1{margin-left:.25rem !important}.ms-md-2{margin-left:.5rem !important}.ms-md-3{margin-left:1rem !important}.ms-md-4{margin-left:1.5rem !important}.ms-md-5{margin-left:3rem !important}.ms-md-auto{margin-left:auto !important}.p-md-0{padding:0 !important}.p-md-1{padding:.25rem !important}.p-md-2{padding:.5rem !important}.p-md-3{padding:1rem !important}.p-md-4{padding:1.5rem !important}.p-md-5{padding:3rem !important}.px-md-0{padding-right:0 !important;padding-left:0 !important}.px-md-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-md-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-md-3{padding-right:1rem !important;padding-left:1rem !important}.px-md-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-md-5{padding-right:3rem !important;padding-left:3rem !important}.py-md-0{padding-top:0 !important;padding-bottom:0 !important}.py-md-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-md-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-md-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-md-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-md-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-md-0{padding-top:0 !important}.pt-md-1{padding-top:.25rem !important}.pt-md-2{padding-top:.5rem !important}.pt-md-3{padding-top:1rem !important}.pt-md-4{padding-top:1.5rem !important}.pt-md-5{padding-top:3rem !important}.pe-md-0{padding-right:0 !important}.pe-md-1{padding-right:.25rem !important}.pe-md-2{padding-right:.5rem !important}.pe-md-3{padding-right:1rem !important}.pe-md-4{padding-right:1.5rem !important}.pe-md-5{padding-right:3rem !important}.pb-md-0{padding-bottom:0 !important}.pb-md-1{padding-bottom:.25rem !important}.pb-md-2{padding-bottom:.5rem !important}.pb-md-3{padding-bottom:1rem !important}.pb-md-4{padding-bottom:1.5rem !important}.pb-md-5{padding-bottom:3rem !important}.ps-md-0{padding-left:0 !important}.ps-md-1{padding-left:.25rem !important}.ps-md-2{padding-left:.5rem !important}.ps-md-3{padding-left:1rem !important}.ps-md-4{padding-left:1.5rem !important}.ps-md-5{padding-left:3rem !important}.gap-md-0{gap:0 !important}.gap-md-1{gap:.25rem !important}.gap-md-2{gap:.5rem !important}.gap-md-3{gap:1rem !important}.gap-md-4{gap:1.5rem !important}.gap-md-5{gap:3rem !important}.row-gap-md-0{row-gap:0 !important}.row-gap-md-1{row-gap:.25rem !important}.row-gap-md-2{row-gap:.5rem !important}.row-gap-md-3{row-gap:1rem !important}.row-gap-md-4{row-gap:1.5rem !important}.row-gap-md-5{row-gap:3rem !important}.column-gap-md-0{column-gap:0 !important}.column-gap-md-1{column-gap:.25rem !important}.column-gap-md-2{column-gap:.5rem !important}.column-gap-md-3{column-gap:1rem !important}.column-gap-md-4{column-gap:1.5rem !important}.column-gap-md-5{column-gap:3rem !important}.text-md-start{text-align:left !important}.text-md-end{text-align:right !important}.text-md-center{text-align:center !important}}@media(min-width: 960px){.float-lg-start{float:left !important}.float-lg-end{float:right !important}.float-lg-none{float:none !important}.object-fit-lg-contain{object-fit:contain !important}.object-fit-lg-cover{object-fit:cover !important}.object-fit-lg-fill{object-fit:fill !important}.object-fit-lg-scale{object-fit:scale-down !important}.object-fit-lg-none{object-fit:none !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-block{display:block !important}.d-lg-grid{display:grid !important}.d-lg-inline-grid{display:inline-grid !important}.d-lg-table{display:table !important}.d-lg-table-row{display:table-row !important}.d-lg-table-cell{display:table-cell !important}.d-lg-flex{display:flex !important}.d-lg-inline-flex{display:inline-flex !important}.d-lg-none{display:none !important}.flex-lg-fill{flex:1 1 auto !important}.flex-lg-row{flex-direction:row !important}.flex-lg-column{flex-direction:column !important}.flex-lg-row-reverse{flex-direction:row-reverse !important}.flex-lg-column-reverse{flex-direction:column-reverse !important}.flex-lg-grow-0{flex-grow:0 !important}.flex-lg-grow-1{flex-grow:1 !important}.flex-lg-shrink-0{flex-shrink:0 !important}.flex-lg-shrink-1{flex-shrink:1 !important}.flex-lg-wrap{flex-wrap:wrap !important}.flex-lg-nowrap{flex-wrap:nowrap !important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-lg-start{justify-content:flex-start !important}.justify-content-lg-end{justify-content:flex-end !important}.justify-content-lg-center{justify-content:center !important}.justify-content-lg-between{justify-content:space-between !important}.justify-content-lg-around{justify-content:space-around !important}.justify-content-lg-evenly{justify-content:space-evenly !important}.align-items-lg-start{align-items:flex-start !important}.align-items-lg-end{align-items:flex-end !important}.align-items-lg-center{align-items:center !important}.align-items-lg-baseline{align-items:baseline !important}.align-items-lg-stretch{align-items:stretch !important}.align-content-lg-start{align-content:flex-start !important}.align-content-lg-end{align-content:flex-end !important}.align-content-lg-center{align-content:center !important}.align-content-lg-between{align-content:space-between !important}.align-content-lg-around{align-content:space-around !important}.align-content-lg-stretch{align-content:stretch !important}.align-self-lg-auto{align-self:auto !important}.align-self-lg-start{align-self:flex-start !important}.align-self-lg-end{align-self:flex-end !important}.align-self-lg-center{align-self:center !important}.align-self-lg-baseline{align-self:baseline !important}.align-self-lg-stretch{align-self:stretch !important}.order-lg-first{order:-1 !important}.order-lg-0{order:0 !important}.order-lg-1{order:1 !important}.order-lg-2{order:2 !important}.order-lg-3{order:3 !important}.order-lg-4{order:4 !important}.order-lg-5{order:5 !important}.order-lg-last{order:6 !important}.m-lg-0{margin:0 !important}.m-lg-1{margin:.25rem !important}.m-lg-2{margin:.5rem !important}.m-lg-3{margin:1rem !important}.m-lg-4{margin:1.5rem !important}.m-lg-5{margin:3rem !important}.m-lg-auto{margin:auto !important}.mx-lg-0{margin-right:0 !important;margin-left:0 !important}.mx-lg-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-lg-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-lg-3{margin-right:1rem !important;margin-left:1rem !important}.mx-lg-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-lg-5{margin-right:3rem !important;margin-left:3rem !important}.mx-lg-auto{margin-right:auto !important;margin-left:auto !important}.my-lg-0{margin-top:0 !important;margin-bottom:0 !important}.my-lg-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-lg-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-lg-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-lg-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-lg-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-lg-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-lg-0{margin-top:0 !important}.mt-lg-1{margin-top:.25rem !important}.mt-lg-2{margin-top:.5rem !important}.mt-lg-3{margin-top:1rem !important}.mt-lg-4{margin-top:1.5rem !important}.mt-lg-5{margin-top:3rem !important}.mt-lg-auto{margin-top:auto !important}.me-lg-0{margin-right:0 !important}.me-lg-1{margin-right:.25rem !important}.me-lg-2{margin-right:.5rem !important}.me-lg-3{margin-right:1rem !important}.me-lg-4{margin-right:1.5rem !important}.me-lg-5{margin-right:3rem !important}.me-lg-auto{margin-right:auto !important}.mb-lg-0{margin-bottom:0 !important}.mb-lg-1{margin-bottom:.25rem !important}.mb-lg-2{margin-bottom:.5rem !important}.mb-lg-3{margin-bottom:1rem !important}.mb-lg-4{margin-bottom:1.5rem !important}.mb-lg-5{margin-bottom:3rem !important}.mb-lg-auto{margin-bottom:auto !important}.ms-lg-0{margin-left:0 !important}.ms-lg-1{margin-left:.25rem !important}.ms-lg-2{margin-left:.5rem !important}.ms-lg-3{margin-left:1rem !important}.ms-lg-4{margin-left:1.5rem !important}.ms-lg-5{margin-left:3rem !important}.ms-lg-auto{margin-left:auto !important}.p-lg-0{padding:0 !important}.p-lg-1{padding:.25rem !important}.p-lg-2{padding:.5rem !important}.p-lg-3{padding:1rem !important}.p-lg-4{padding:1.5rem !important}.p-lg-5{padding:3rem !important}.px-lg-0{padding-right:0 !important;padding-left:0 !important}.px-lg-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-lg-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-lg-3{padding-right:1rem !important;padding-left:1rem !important}.px-lg-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-lg-5{padding-right:3rem !important;padding-left:3rem !important}.py-lg-0{padding-top:0 !important;padding-bottom:0 !important}.py-lg-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-lg-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-lg-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-lg-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-lg-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-lg-0{padding-top:0 !important}.pt-lg-1{padding-top:.25rem !important}.pt-lg-2{padding-top:.5rem !important}.pt-lg-3{padding-top:1rem !important}.pt-lg-4{padding-top:1.5rem !important}.pt-lg-5{padding-top:3rem !important}.pe-lg-0{padding-right:0 !important}.pe-lg-1{padding-right:.25rem !important}.pe-lg-2{padding-right:.5rem !important}.pe-lg-3{padding-right:1rem !important}.pe-lg-4{padding-right:1.5rem !important}.pe-lg-5{padding-right:3rem !important}.pb-lg-0{padding-bottom:0 !important}.pb-lg-1{padding-bottom:.25rem !important}.pb-lg-2{padding-bottom:.5rem !important}.pb-lg-3{padding-bottom:1rem !important}.pb-lg-4{padding-bottom:1.5rem !important}.pb-lg-5{padding-bottom:3rem !important}.ps-lg-0{padding-left:0 !important}.ps-lg-1{padding-left:.25rem !important}.ps-lg-2{padding-left:.5rem !important}.ps-lg-3{padding-left:1rem !important}.ps-lg-4{padding-left:1.5rem !important}.ps-lg-5{padding-left:3rem !important}.gap-lg-0{gap:0 !important}.gap-lg-1{gap:.25rem !important}.gap-lg-2{gap:.5rem !important}.gap-lg-3{gap:1rem !important}.gap-lg-4{gap:1.5rem !important}.gap-lg-5{gap:3rem !important}.row-gap-lg-0{row-gap:0 !important}.row-gap-lg-1{row-gap:.25rem !important}.row-gap-lg-2{row-gap:.5rem !important}.row-gap-lg-3{row-gap:1rem !important}.row-gap-lg-4{row-gap:1.5rem !important}.row-gap-lg-5{row-gap:3rem !important}.column-gap-lg-0{column-gap:0 !important}.column-gap-lg-1{column-gap:.25rem !important}.column-gap-lg-2{column-gap:.5rem !important}.column-gap-lg-3{column-gap:1rem !important}.column-gap-lg-4{column-gap:1.5rem !important}.column-gap-lg-5{column-gap:3rem !important}.text-lg-start{text-align:left !important}.text-lg-end{text-align:right !important}.text-lg-center{text-align:center !important}}@media(min-width: 1200px){.float-xl-start{float:left !important}.float-xl-end{float:right !important}.float-xl-none{float:none !important}.object-fit-xl-contain{object-fit:contain !important}.object-fit-xl-cover{object-fit:cover !important}.object-fit-xl-fill{object-fit:fill !important}.object-fit-xl-scale{object-fit:scale-down !important}.object-fit-xl-none{object-fit:none !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-block{display:block !important}.d-xl-grid{display:grid !important}.d-xl-inline-grid{display:inline-grid !important}.d-xl-table{display:table !important}.d-xl-table-row{display:table-row !important}.d-xl-table-cell{display:table-cell !important}.d-xl-flex{display:flex !important}.d-xl-inline-flex{display:inline-flex !important}.d-xl-none{display:none !important}.flex-xl-fill{flex:1 1 auto !important}.flex-xl-row{flex-direction:row !important}.flex-xl-column{flex-direction:column !important}.flex-xl-row-reverse{flex-direction:row-reverse !important}.flex-xl-column-reverse{flex-direction:column-reverse !important}.flex-xl-grow-0{flex-grow:0 !important}.flex-xl-grow-1{flex-grow:1 !important}.flex-xl-shrink-0{flex-shrink:0 !important}.flex-xl-shrink-1{flex-shrink:1 !important}.flex-xl-wrap{flex-wrap:wrap !important}.flex-xl-nowrap{flex-wrap:nowrap !important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-xl-start{justify-content:flex-start !important}.justify-content-xl-end{justify-content:flex-end !important}.justify-content-xl-center{justify-content:center !important}.justify-content-xl-between{justify-content:space-between !important}.justify-content-xl-around{justify-content:space-around !important}.justify-content-xl-evenly{justify-content:space-evenly !important}.align-items-xl-start{align-items:flex-start !important}.align-items-xl-end{align-items:flex-end !important}.align-items-xl-center{align-items:center !important}.align-items-xl-baseline{align-items:baseline !important}.align-items-xl-stretch{align-items:stretch !important}.align-content-xl-start{align-content:flex-start !important}.align-content-xl-end{align-content:flex-end !important}.align-content-xl-center{align-content:center !important}.align-content-xl-between{align-content:space-between !important}.align-content-xl-around{align-content:space-around !important}.align-content-xl-stretch{align-content:stretch !important}.align-self-xl-auto{align-self:auto !important}.align-self-xl-start{align-self:flex-start !important}.align-self-xl-end{align-self:flex-end !important}.align-self-xl-center{align-self:center !important}.align-self-xl-baseline{align-self:baseline !important}.align-self-xl-stretch{align-self:stretch !important}.order-xl-first{order:-1 !important}.order-xl-0{order:0 !important}.order-xl-1{order:1 !important}.order-xl-2{order:2 !important}.order-xl-3{order:3 !important}.order-xl-4{order:4 !important}.order-xl-5{order:5 !important}.order-xl-last{order:6 !important}.m-xl-0{margin:0 !important}.m-xl-1{margin:.25rem !important}.m-xl-2{margin:.5rem !important}.m-xl-3{margin:1rem !important}.m-xl-4{margin:1.5rem !important}.m-xl-5{margin:3rem !important}.m-xl-auto{margin:auto !important}.mx-xl-0{margin-right:0 !important;margin-left:0 !important}.mx-xl-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-xl-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-xl-3{margin-right:1rem !important;margin-left:1rem !important}.mx-xl-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-xl-5{margin-right:3rem !important;margin-left:3rem !important}.mx-xl-auto{margin-right:auto !important;margin-left:auto !important}.my-xl-0{margin-top:0 !important;margin-bottom:0 !important}.my-xl-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-xl-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-xl-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-xl-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-xl-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-xl-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-xl-0{margin-top:0 !important}.mt-xl-1{margin-top:.25rem !important}.mt-xl-2{margin-top:.5rem !important}.mt-xl-3{margin-top:1rem !important}.mt-xl-4{margin-top:1.5rem !important}.mt-xl-5{margin-top:3rem !important}.mt-xl-auto{margin-top:auto !important}.me-xl-0{margin-right:0 !important}.me-xl-1{margin-right:.25rem !important}.me-xl-2{margin-right:.5rem !important}.me-xl-3{margin-right:1rem !important}.me-xl-4{margin-right:1.5rem !important}.me-xl-5{margin-right:3rem !important}.me-xl-auto{margin-right:auto !important}.mb-xl-0{margin-bottom:0 !important}.mb-xl-1{margin-bottom:.25rem !important}.mb-xl-2{margin-bottom:.5rem !important}.mb-xl-3{margin-bottom:1rem !important}.mb-xl-4{margin-bottom:1.5rem !important}.mb-xl-5{margin-bottom:3rem !important}.mb-xl-auto{margin-bottom:auto !important}.ms-xl-0{margin-left:0 !important}.ms-xl-1{margin-left:.25rem !important}.ms-xl-2{margin-left:.5rem !important}.ms-xl-3{margin-left:1rem !important}.ms-xl-4{margin-left:1.5rem !important}.ms-xl-5{margin-left:3rem !important}.ms-xl-auto{margin-left:auto !important}.p-xl-0{padding:0 !important}.p-xl-1{padding:.25rem !important}.p-xl-2{padding:.5rem !important}.p-xl-3{padding:1rem !important}.p-xl-4{padding:1.5rem !important}.p-xl-5{padding:3rem !important}.px-xl-0{padding-right:0 !important;padding-left:0 !important}.px-xl-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-xl-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-xl-3{padding-right:1rem !important;padding-left:1rem !important}.px-xl-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-xl-5{padding-right:3rem !important;padding-left:3rem !important}.py-xl-0{padding-top:0 !important;padding-bottom:0 !important}.py-xl-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-xl-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-xl-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-xl-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-xl-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-xl-0{padding-top:0 !important}.pt-xl-1{padding-top:.25rem !important}.pt-xl-2{padding-top:.5rem !important}.pt-xl-3{padding-top:1rem !important}.pt-xl-4{padding-top:1.5rem !important}.pt-xl-5{padding-top:3rem !important}.pe-xl-0{padding-right:0 !important}.pe-xl-1{padding-right:.25rem !important}.pe-xl-2{padding-right:.5rem !important}.pe-xl-3{padding-right:1rem !important}.pe-xl-4{padding-right:1.5rem !important}.pe-xl-5{padding-right:3rem !important}.pb-xl-0{padding-bottom:0 !important}.pb-xl-1{padding-bottom:.25rem !important}.pb-xl-2{padding-bottom:.5rem !important}.pb-xl-3{padding-bottom:1rem !important}.pb-xl-4{padding-bottom:1.5rem !important}.pb-xl-5{padding-bottom:3rem !important}.ps-xl-0{padding-left:0 !important}.ps-xl-1{padding-left:.25rem !important}.ps-xl-2{padding-left:.5rem !important}.ps-xl-3{padding-left:1rem !important}.ps-xl-4{padding-left:1.5rem !important}.ps-xl-5{padding-left:3rem !important}.gap-xl-0{gap:0 !important}.gap-xl-1{gap:.25rem !important}.gap-xl-2{gap:.5rem !important}.gap-xl-3{gap:1rem !important}.gap-xl-4{gap:1.5rem !important}.gap-xl-5{gap:3rem !important}.row-gap-xl-0{row-gap:0 !important}.row-gap-xl-1{row-gap:.25rem !important}.row-gap-xl-2{row-gap:.5rem !important}.row-gap-xl-3{row-gap:1rem !important}.row-gap-xl-4{row-gap:1.5rem !important}.row-gap-xl-5{row-gap:3rem !important}.column-gap-xl-0{column-gap:0 !important}.column-gap-xl-1{column-gap:.25rem !important}.column-gap-xl-2{column-gap:.5rem !important}.column-gap-xl-3{column-gap:1rem !important}.column-gap-xl-4{column-gap:1.5rem !important}.column-gap-xl-5{column-gap:3rem !important}.text-xl-start{text-align:left !important}.text-xl-end{text-align:right !important}.text-xl-center{text-align:center !important}}@media(min-width: 1200px){.fs-1{font-size:2.5rem !important}.fs-2{font-size:2rem !important}.fs-3{font-size:1.75rem !important}.fs-4{font-size:1.5rem !important}}@media print{.d-print-inline{display:inline !important}.d-print-inline-block{display:inline-block !important}.d-print-block{display:block !important}.d-print-grid{display:grid !important}.d-print-inline-grid{display:inline-grid !important}.d-print-table{display:table !important}.d-print-table-row{display:table-row !important}.d-print-table-cell{display:table-cell !important}.d-print-flex{display:flex !important}.d-print-inline-flex{display:inline-flex !important}.d-print-none{display:none !important}}","@mixin bsBanner($file) {\n /*!\n * Bootstrap #{$file} v5.3.3 (https://getbootstrap.com/)\n * Copyright 2011-2024 The Bootstrap Authors\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\n}\n",":root,\n[data-bs-theme=\"light\"] {\n // Note: Custom variable values only support SassScript inside `#{}`.\n\n // Colors\n //\n // Generate palettes for full colors, grays, and theme colors.\n\n @each $color, $value in $colors {\n --#{$prefix}#{$color}: #{$value};\n }\n\n @each $color, $value in $grays {\n --#{$prefix}gray-#{$color}: #{$value};\n }\n\n @each $color, $value in $theme-colors {\n --#{$prefix}#{$color}: #{$value};\n }\n\n @each $color, $value in $theme-colors-rgb {\n --#{$prefix}#{$color}-rgb: #{$value};\n }\n\n @each $color, $value in $theme-colors-text {\n --#{$prefix}#{$color}-text-emphasis: #{$value};\n }\n\n @each $color, $value in $theme-colors-bg-subtle {\n --#{$prefix}#{$color}-bg-subtle: #{$value};\n }\n\n @each $color, $value in $theme-colors-border-subtle {\n --#{$prefix}#{$color}-border-subtle: #{$value};\n }\n\n --#{$prefix}white-rgb: #{to-rgb($white)};\n --#{$prefix}black-rgb: #{to-rgb($black)};\n\n // Fonts\n\n // Note: Use `inspect` for lists so that quoted items keep the quotes.\n // See https://github.com/sass/sass/issues/2383#issuecomment-336349172\n --#{$prefix}font-sans-serif: #{inspect($font-family-sans-serif)};\n --#{$prefix}font-monospace: #{inspect($font-family-monospace)};\n --#{$prefix}gradient: #{$gradient};\n\n // Root and body\n // scss-docs-start root-body-variables\n @if $font-size-root != null {\n --#{$prefix}root-font-size: #{$font-size-root};\n }\n --#{$prefix}body-font-family: #{inspect($font-family-base)};\n @include rfs($font-size-base, --#{$prefix}body-font-size);\n --#{$prefix}body-font-weight: #{$font-weight-base};\n --#{$prefix}body-line-height: #{$line-height-base};\n @if $body-text-align != null {\n --#{$prefix}body-text-align: #{$body-text-align};\n }\n\n --#{$prefix}body-color: #{$body-color};\n --#{$prefix}body-color-rgb: #{to-rgb($body-color)};\n --#{$prefix}body-bg: #{$body-bg};\n --#{$prefix}body-bg-rgb: #{to-rgb($body-bg)};\n\n --#{$prefix}emphasis-color: #{$body-emphasis-color};\n --#{$prefix}emphasis-color-rgb: #{to-rgb($body-emphasis-color)};\n\n --#{$prefix}secondary-color: #{$body-secondary-color};\n --#{$prefix}secondary-color-rgb: #{to-rgb($body-secondary-color)};\n --#{$prefix}secondary-bg: #{$body-secondary-bg};\n --#{$prefix}secondary-bg-rgb: #{to-rgb($body-secondary-bg)};\n\n --#{$prefix}tertiary-color: #{$body-tertiary-color};\n --#{$prefix}tertiary-color-rgb: #{to-rgb($body-tertiary-color)};\n --#{$prefix}tertiary-bg: #{$body-tertiary-bg};\n --#{$prefix}tertiary-bg-rgb: #{to-rgb($body-tertiary-bg)};\n // scss-docs-end root-body-variables\n\n --#{$prefix}heading-color: #{$headings-color};\n\n --#{$prefix}link-color: #{$link-color};\n --#{$prefix}link-color-rgb: #{to-rgb($link-color)};\n --#{$prefix}link-decoration: #{$link-decoration};\n\n --#{$prefix}link-hover-color: #{$link-hover-color};\n --#{$prefix}link-hover-color-rgb: #{to-rgb($link-hover-color)};\n\n @if $link-hover-decoration != null {\n --#{$prefix}link-hover-decoration: #{$link-hover-decoration};\n }\n\n --#{$prefix}code-color: #{$code-color};\n --#{$prefix}highlight-color: #{$mark-color};\n --#{$prefix}highlight-bg: #{$mark-bg};\n\n // scss-docs-start root-border-var\n --#{$prefix}border-width: #{$border-width};\n --#{$prefix}border-style: #{$border-style};\n --#{$prefix}border-color: #{$border-color};\n --#{$prefix}border-color-translucent: #{$border-color-translucent};\n\n --#{$prefix}border-radius: #{$border-radius};\n --#{$prefix}border-radius-sm: #{$border-radius-sm};\n --#{$prefix}border-radius-lg: #{$border-radius-lg};\n --#{$prefix}border-radius-xl: #{$border-radius-xl};\n --#{$prefix}border-radius-xxl: #{$border-radius-xxl};\n --#{$prefix}border-radius-2xl: var(--#{$prefix}border-radius-xxl); // Deprecated in v5.3.0 for consistency\n --#{$prefix}border-radius-pill: #{$border-radius-pill};\n // scss-docs-end root-border-var\n\n --#{$prefix}box-shadow: #{$box-shadow};\n --#{$prefix}box-shadow-sm: #{$box-shadow-sm};\n --#{$prefix}box-shadow-lg: #{$box-shadow-lg};\n --#{$prefix}box-shadow-inset: #{$box-shadow-inset};\n\n // Focus styles\n // scss-docs-start root-focus-variables\n --#{$prefix}focus-ring-width: #{$focus-ring-width};\n --#{$prefix}focus-ring-opacity: #{$focus-ring-opacity};\n --#{$prefix}focus-ring-color: #{$focus-ring-color};\n // scss-docs-end root-focus-variables\n\n // scss-docs-start root-form-validation-variables\n --#{$prefix}form-valid-color: #{$form-valid-color};\n --#{$prefix}form-valid-border-color: #{$form-valid-border-color};\n --#{$prefix}form-invalid-color: #{$form-invalid-color};\n --#{$prefix}form-invalid-border-color: #{$form-invalid-border-color};\n // scss-docs-end root-form-validation-variables\n}\n\n@if $enable-dark-mode {\n @include color-mode(dark, true) {\n color-scheme: dark;\n\n // scss-docs-start root-dark-mode-vars\n --#{$prefix}body-color: #{$body-color-dark};\n --#{$prefix}body-color-rgb: #{to-rgb($body-color-dark)};\n --#{$prefix}body-bg: #{$body-bg-dark};\n --#{$prefix}body-bg-rgb: #{to-rgb($body-bg-dark)};\n\n --#{$prefix}emphasis-color: #{$body-emphasis-color-dark};\n --#{$prefix}emphasis-color-rgb: #{to-rgb($body-emphasis-color-dark)};\n\n --#{$prefix}secondary-color: #{$body-secondary-color-dark};\n --#{$prefix}secondary-color-rgb: #{to-rgb($body-secondary-color-dark)};\n --#{$prefix}secondary-bg: #{$body-secondary-bg-dark};\n --#{$prefix}secondary-bg-rgb: #{to-rgb($body-secondary-bg-dark)};\n\n --#{$prefix}tertiary-color: #{$body-tertiary-color-dark};\n --#{$prefix}tertiary-color-rgb: #{to-rgb($body-tertiary-color-dark)};\n --#{$prefix}tertiary-bg: #{$body-tertiary-bg-dark};\n --#{$prefix}tertiary-bg-rgb: #{to-rgb($body-tertiary-bg-dark)};\n\n @each $color, $value in $theme-colors-text-dark {\n --#{$prefix}#{$color}-text-emphasis: #{$value};\n }\n\n @each $color, $value in $theme-colors-bg-subtle-dark {\n --#{$prefix}#{$color}-bg-subtle: #{$value};\n }\n\n @each $color, $value in $theme-colors-border-subtle-dark {\n --#{$prefix}#{$color}-border-subtle: #{$value};\n }\n\n --#{$prefix}heading-color: #{$headings-color-dark};\n\n --#{$prefix}link-color: #{$link-color-dark};\n --#{$prefix}link-hover-color: #{$link-hover-color-dark};\n --#{$prefix}link-color-rgb: #{to-rgb($link-color-dark)};\n --#{$prefix}link-hover-color-rgb: #{to-rgb($link-hover-color-dark)};\n\n --#{$prefix}code-color: #{$code-color-dark};\n --#{$prefix}highlight-color: #{$mark-color-dark};\n --#{$prefix}highlight-bg: #{$mark-bg-dark};\n\n --#{$prefix}border-color: #{$border-color-dark};\n --#{$prefix}border-color-translucent: #{$border-color-translucent-dark};\n\n --#{$prefix}form-valid-color: #{$form-valid-color-dark};\n --#{$prefix}form-valid-border-color: #{$form-valid-border-color-dark};\n --#{$prefix}form-invalid-color: #{$form-invalid-color-dark};\n --#{$prefix}form-invalid-border-color: #{$form-invalid-border-color-dark};\n // scss-docs-end root-dark-mode-vars\n }\n}\n","// stylelint-disable scss/dimension-no-non-numeric-values\n\n// SCSS RFS mixin\n//\n// Automated responsive values for font sizes, paddings, margins and much more\n//\n// Licensed under MIT (https://github.com/twbs/rfs/blob/main/LICENSE)\n\n// Configuration\n\n// Base value\n$rfs-base-value: 1.25rem !default;\n$rfs-unit: rem !default;\n\n@if $rfs-unit != rem and $rfs-unit != px {\n @error \"`#{$rfs-unit}` is not a valid unit for $rfs-unit. Use `px` or `rem`.\";\n}\n\n// Breakpoint at where values start decreasing if screen width is smaller\n$rfs-breakpoint: 1200px !default;\n$rfs-breakpoint-unit: px !default;\n\n@if $rfs-breakpoint-unit != px and $rfs-breakpoint-unit != em and $rfs-breakpoint-unit != rem {\n @error \"`#{$rfs-breakpoint-unit}` is not a valid unit for $rfs-breakpoint-unit. Use `px`, `em` or `rem`.\";\n}\n\n// Resize values based on screen height and width\n$rfs-two-dimensional: false !default;\n\n// Factor of decrease\n$rfs-factor: 10 !default;\n\n@if type-of($rfs-factor) != number or $rfs-factor <= 1 {\n @error \"`#{$rfs-factor}` is not a valid $rfs-factor, it must be greater than 1.\";\n}\n\n// Mode. Possibilities: \"min-media-query\", \"max-media-query\"\n$rfs-mode: min-media-query !default;\n\n// Generate enable or disable classes. Possibilities: false, \"enable\" or \"disable\"\n$rfs-class: false !default;\n\n// 1 rem = $rfs-rem-value px\n$rfs-rem-value: 16 !default;\n\n// Safari iframe resize bug: https://github.com/twbs/rfs/issues/14\n$rfs-safari-iframe-resize-bug-fix: false !default;\n\n// Disable RFS by setting $enable-rfs to false\n$enable-rfs: true !default;\n\n// Cache $rfs-base-value unit\n$rfs-base-value-unit: unit($rfs-base-value);\n\n@function divide($dividend, $divisor, $precision: 10) {\n $sign: if($dividend > 0 and $divisor > 0 or $dividend < 0 and $divisor < 0, 1, -1);\n $dividend: abs($dividend);\n $divisor: abs($divisor);\n @if $dividend == 0 {\n @return 0;\n }\n @if $divisor == 0 {\n @error \"Cannot divide by 0\";\n }\n $remainder: $dividend;\n $result: 0;\n $factor: 10;\n @while ($remainder > 0 and $precision >= 0) {\n $quotient: 0;\n @while ($remainder >= $divisor) {\n $remainder: $remainder - $divisor;\n $quotient: $quotient + 1;\n }\n $result: $result * 10 + $quotient;\n $factor: $factor * .1;\n $remainder: $remainder * 10;\n $precision: $precision - 1;\n @if ($precision < 0 and $remainder >= $divisor * 5) {\n $result: $result + 1;\n }\n }\n $result: $result * $factor * $sign;\n $dividend-unit: unit($dividend);\n $divisor-unit: unit($divisor);\n $unit-map: (\n \"px\": 1px,\n \"rem\": 1rem,\n \"em\": 1em,\n \"%\": 1%\n );\n @if ($dividend-unit != $divisor-unit and map-has-key($unit-map, $dividend-unit)) {\n $result: $result * map-get($unit-map, $dividend-unit);\n }\n @return $result;\n}\n\n// Remove px-unit from $rfs-base-value for calculations\n@if $rfs-base-value-unit == px {\n $rfs-base-value: divide($rfs-base-value, $rfs-base-value * 0 + 1);\n}\n@else if $rfs-base-value-unit == rem {\n $rfs-base-value: divide($rfs-base-value, divide($rfs-base-value * 0 + 1, $rfs-rem-value));\n}\n\n// Cache $rfs-breakpoint unit to prevent multiple calls\n$rfs-breakpoint-unit-cache: unit($rfs-breakpoint);\n\n// Remove unit from $rfs-breakpoint for calculations\n@if $rfs-breakpoint-unit-cache == px {\n $rfs-breakpoint: divide($rfs-breakpoint, $rfs-breakpoint * 0 + 1);\n}\n@else if $rfs-breakpoint-unit-cache == rem or $rfs-breakpoint-unit-cache == \"em\" {\n $rfs-breakpoint: divide($rfs-breakpoint, divide($rfs-breakpoint * 0 + 1, $rfs-rem-value));\n}\n\n// Calculate the media query value\n$rfs-mq-value: if($rfs-breakpoint-unit == px, #{$rfs-breakpoint}px, #{divide($rfs-breakpoint, $rfs-rem-value)}#{$rfs-breakpoint-unit});\n$rfs-mq-property-width: if($rfs-mode == max-media-query, max-width, min-width);\n$rfs-mq-property-height: if($rfs-mode == max-media-query, max-height, min-height);\n\n// Internal mixin used to determine which media query needs to be used\n@mixin _rfs-media-query {\n @if $rfs-two-dimensional {\n @if $rfs-mode == max-media-query {\n @media (#{$rfs-mq-property-width}: #{$rfs-mq-value}), (#{$rfs-mq-property-height}: #{$rfs-mq-value}) {\n @content;\n }\n }\n @else {\n @media (#{$rfs-mq-property-width}: #{$rfs-mq-value}) and (#{$rfs-mq-property-height}: #{$rfs-mq-value}) {\n @content;\n }\n }\n }\n @else {\n @media (#{$rfs-mq-property-width}: #{$rfs-mq-value}) {\n @content;\n }\n }\n}\n\n// Internal mixin that adds disable classes to the selector if needed.\n@mixin _rfs-rule {\n @if $rfs-class == disable and $rfs-mode == max-media-query {\n // Adding an extra class increases specificity, which prevents the media query to override the property\n &,\n .disable-rfs &,\n &.disable-rfs {\n @content;\n }\n }\n @else if $rfs-class == enable and $rfs-mode == min-media-query {\n .enable-rfs &,\n &.enable-rfs {\n @content;\n }\n } @else {\n @content;\n }\n}\n\n// Internal mixin that adds enable classes to the selector if needed.\n@mixin _rfs-media-query-rule {\n\n @if $rfs-class == enable {\n @if $rfs-mode == min-media-query {\n @content;\n }\n\n @include _rfs-media-query () {\n .enable-rfs &,\n &.enable-rfs {\n @content;\n }\n }\n }\n @else {\n @if $rfs-class == disable and $rfs-mode == min-media-query {\n .disable-rfs &,\n &.disable-rfs {\n @content;\n }\n }\n @include _rfs-media-query () {\n @content;\n }\n }\n}\n\n// Helper function to get the formatted non-responsive value\n@function rfs-value($values) {\n // Convert to list\n $values: if(type-of($values) != list, ($values,), $values);\n\n $val: \"\";\n\n // Loop over each value and calculate value\n @each $value in $values {\n @if $value == 0 {\n $val: $val + \" 0\";\n }\n @else {\n // Cache $value unit\n $unit: if(type-of($value) == \"number\", unit($value), false);\n\n @if $unit == px {\n // Convert to rem if needed\n $val: $val + \" \" + if($rfs-unit == rem, #{divide($value, $value * 0 + $rfs-rem-value)}rem, $value);\n }\n @else if $unit == rem {\n // Convert to px if needed\n $val: $val + \" \" + if($rfs-unit == px, #{divide($value, $value * 0 + 1) * $rfs-rem-value}px, $value);\n } @else {\n // If $value isn't a number (like inherit) or $value has a unit (not px or rem, like 1.5em) or $ is 0, just print the value\n $val: $val + \" \" + $value;\n }\n }\n }\n\n // Remove first space\n @return unquote(str-slice($val, 2));\n}\n\n// Helper function to get the responsive value calculated by RFS\n@function rfs-fluid-value($values) {\n // Convert to list\n $values: if(type-of($values) != list, ($values,), $values);\n\n $val: \"\";\n\n // Loop over each value and calculate value\n @each $value in $values {\n @if $value == 0 {\n $val: $val + \" 0\";\n } @else {\n // Cache $value unit\n $unit: if(type-of($value) == \"number\", unit($value), false);\n\n // If $value isn't a number (like inherit) or $value has a unit (not px or rem, like 1.5em) or $ is 0, just print the value\n @if not $unit or $unit != px and $unit != rem {\n $val: $val + \" \" + $value;\n } @else {\n // Remove unit from $value for calculations\n $value: divide($value, $value * 0 + if($unit == px, 1, divide(1, $rfs-rem-value)));\n\n // Only add the media query if the value is greater than the minimum value\n @if abs($value) <= $rfs-base-value or not $enable-rfs {\n $val: $val + \" \" + if($rfs-unit == rem, #{divide($value, $rfs-rem-value)}rem, #{$value}px);\n }\n @else {\n // Calculate the minimum value\n $value-min: $rfs-base-value + divide(abs($value) - $rfs-base-value, $rfs-factor);\n\n // Calculate difference between $value and the minimum value\n $value-diff: abs($value) - $value-min;\n\n // Base value formatting\n $min-width: if($rfs-unit == rem, #{divide($value-min, $rfs-rem-value)}rem, #{$value-min}px);\n\n // Use negative value if needed\n $min-width: if($value < 0, -$min-width, $min-width);\n\n // Use `vmin` if two-dimensional is enabled\n $variable-unit: if($rfs-two-dimensional, vmin, vw);\n\n // Calculate the variable width between 0 and $rfs-breakpoint\n $variable-width: #{divide($value-diff * 100, $rfs-breakpoint)}#{$variable-unit};\n\n // Return the calculated value\n $val: $val + \" calc(\" + $min-width + if($value < 0, \" - \", \" + \") + $variable-width + \")\";\n }\n }\n }\n }\n\n // Remove first space\n @return unquote(str-slice($val, 2));\n}\n\n// RFS mixin\n@mixin rfs($values, $property: font-size) {\n @if $values != null {\n $val: rfs-value($values);\n $fluid-val: rfs-fluid-value($values);\n\n // Do not print the media query if responsive & non-responsive values are the same\n @if $val == $fluid-val {\n #{$property}: $val;\n }\n @else {\n @include _rfs-rule () {\n #{$property}: if($rfs-mode == max-media-query, $val, $fluid-val);\n\n // Include safari iframe resize fix if needed\n min-width: if($rfs-safari-iframe-resize-bug-fix, (0 * 1vw), null);\n }\n\n @include _rfs-media-query-rule () {\n #{$property}: if($rfs-mode == max-media-query, $fluid-val, $val);\n }\n }\n }\n}\n\n// Shorthand helper mixins\n@mixin font-size($value) {\n @include rfs($value);\n}\n\n@mixin padding($value) {\n @include rfs($value, padding);\n}\n\n@mixin padding-top($value) {\n @include rfs($value, padding-top);\n}\n\n@mixin padding-right($value) {\n @include rfs($value, padding-right);\n}\n\n@mixin padding-bottom($value) {\n @include rfs($value, padding-bottom);\n}\n\n@mixin padding-left($value) {\n @include rfs($value, padding-left);\n}\n\n@mixin margin($value) {\n @include rfs($value, margin);\n}\n\n@mixin margin-top($value) {\n @include rfs($value, margin-top);\n}\n\n@mixin margin-right($value) {\n @include rfs($value, margin-right);\n}\n\n@mixin margin-bottom($value) {\n @include rfs($value, margin-bottom);\n}\n\n@mixin margin-left($value) {\n @include rfs($value, margin-left);\n}\n","// scss-docs-start color-mode-mixin\n@mixin color-mode($mode: light, $root: false) {\n @if $color-mode-type == \"media-query\" {\n @if $root == true {\n @media (prefers-color-scheme: $mode) {\n :root {\n @content;\n }\n }\n } @else {\n @media (prefers-color-scheme: $mode) {\n @content;\n }\n }\n } @else {\n [data-bs-theme=\"#{$mode}\"] {\n @content;\n }\n }\n}\n// scss-docs-end color-mode-mixin\n","// stylelint-disable declaration-no-important, selector-no-qualifying-type, property-no-vendor-prefix\n\n\n// Reboot\n//\n// Normalization of HTML elements, manually forked from Normalize.css to remove\n// styles targeting irrelevant browsers while applying new styles.\n//\n// Normalize is licensed MIT. https://github.com/necolas/normalize.css\n\n\n// Document\n//\n// Change from `box-sizing: content-box` so that `width` is not affected by `padding` or `border`.\n\n*,\n*::before,\n*::after {\n box-sizing: border-box;\n}\n\n\n// Root\n//\n// Ability to the value of the root font sizes, affecting the value of `rem`.\n// null by default, thus nothing is generated.\n\n:root {\n @if $font-size-root != null {\n @include font-size(var(--#{$prefix}root-font-size));\n }\n\n @if $enable-smooth-scroll {\n @media (prefers-reduced-motion: no-preference) {\n scroll-behavior: smooth;\n }\n }\n}\n\n\n// Body\n//\n// 1. Remove the margin in all browsers.\n// 2. As a best practice, apply a default `background-color`.\n// 3. Prevent adjustments of font size after orientation changes in iOS.\n// 4. Change the default tap highlight to be completely transparent in iOS.\n\n// scss-docs-start reboot-body-rules\nbody {\n margin: 0; // 1\n font-family: var(--#{$prefix}body-font-family);\n @include font-size(var(--#{$prefix}body-font-size));\n font-weight: var(--#{$prefix}body-font-weight);\n line-height: var(--#{$prefix}body-line-height);\n color: var(--#{$prefix}body-color);\n text-align: var(--#{$prefix}body-text-align);\n background-color: var(--#{$prefix}body-bg); // 2\n -webkit-text-size-adjust: 100%; // 3\n -webkit-tap-highlight-color: rgba($black, 0); // 4\n}\n// scss-docs-end reboot-body-rules\n\n\n// Content grouping\n//\n// 1. Reset Firefox's gray color\n\nhr {\n margin: $hr-margin-y 0;\n color: $hr-color; // 1\n border: 0;\n border-top: $hr-border-width solid $hr-border-color;\n opacity: $hr-opacity;\n}\n\n\n// Typography\n//\n// 1. Remove top margins from headings\n// By default, `

`-`

` all receive top and bottom margins. We nuke the top\n// margin for easier control within type scales as it avoids margin collapsing.\n\n%heading {\n margin-top: 0; // 1\n margin-bottom: $headings-margin-bottom;\n font-family: $headings-font-family;\n font-style: $headings-font-style;\n font-weight: $headings-font-weight;\n line-height: $headings-line-height;\n color: var(--#{$prefix}heading-color);\n}\n\nh1 {\n @extend %heading;\n @include font-size($h1-font-size);\n}\n\nh2 {\n @extend %heading;\n @include font-size($h2-font-size);\n}\n\nh3 {\n @extend %heading;\n @include font-size($h3-font-size);\n}\n\nh4 {\n @extend %heading;\n @include font-size($h4-font-size);\n}\n\nh5 {\n @extend %heading;\n @include font-size($h5-font-size);\n}\n\nh6 {\n @extend %heading;\n @include font-size($h6-font-size);\n}\n\n\n// Reset margins on paragraphs\n//\n// Similarly, the top margin on `

`s get reset. However, we also reset the\n// bottom margin to use `rem` units instead of `em`.\n\np {\n margin-top: 0;\n margin-bottom: $paragraph-margin-bottom;\n}\n\n\n// Abbreviations\n//\n// 1. Add the correct text decoration in Chrome, Edge, Opera, and Safari.\n// 2. Add explicit cursor to indicate changed behavior.\n// 3. Prevent the text-decoration to be skipped.\n\nabbr[title] {\n text-decoration: underline dotted; // 1\n cursor: help; // 2\n text-decoration-skip-ink: none; // 3\n}\n\n\n// Address\n\naddress {\n margin-bottom: 1rem;\n font-style: normal;\n line-height: inherit;\n}\n\n\n// Lists\n\nol,\nul {\n padding-left: 2rem;\n}\n\nol,\nul,\ndl {\n margin-top: 0;\n margin-bottom: 1rem;\n}\n\nol ol,\nul ul,\nol ul,\nul ol {\n margin-bottom: 0;\n}\n\ndt {\n font-weight: $dt-font-weight;\n}\n\n// 1. Undo browser default\n\ndd {\n margin-bottom: .5rem;\n margin-left: 0; // 1\n}\n\n\n// Blockquote\n\nblockquote {\n margin: 0 0 1rem;\n}\n\n\n// Strong\n//\n// Add the correct font weight in Chrome, Edge, and Safari\n\nb,\nstrong {\n font-weight: $font-weight-bolder;\n}\n\n\n// Small\n//\n// Add the correct font size in all browsers\n\nsmall {\n @include font-size($small-font-size);\n}\n\n\n// Mark\n\nmark {\n padding: $mark-padding;\n color: var(--#{$prefix}highlight-color);\n background-color: var(--#{$prefix}highlight-bg);\n}\n\n\n// Sub and Sup\n//\n// Prevent `sub` and `sup` elements from affecting the line height in\n// all browsers.\n\nsub,\nsup {\n position: relative;\n @include font-size($sub-sup-font-size);\n line-height: 0;\n vertical-align: baseline;\n}\n\nsub { bottom: -.25em; }\nsup { top: -.5em; }\n\n\n// Links\n\na {\n color: rgba(var(--#{$prefix}link-color-rgb), var(--#{$prefix}link-opacity, 1));\n text-decoration: $link-decoration;\n\n &:hover {\n --#{$prefix}link-color-rgb: var(--#{$prefix}link-hover-color-rgb);\n text-decoration: $link-hover-decoration;\n }\n}\n\n// And undo these styles for placeholder links/named anchors (without href).\n// It would be more straightforward to just use a[href] in previous block, but that\n// causes specificity issues in many other styles that are too complex to fix.\n// See https://github.com/twbs/bootstrap/issues/19402\n\na:not([href]):not([class]) {\n &,\n &:hover {\n color: inherit;\n text-decoration: none;\n }\n}\n\n\n// Code\n\npre,\ncode,\nkbd,\nsamp {\n font-family: $font-family-code;\n @include font-size(1em); // Correct the odd `em` font sizing in all browsers.\n}\n\n// 1. Remove browser default top margin\n// 2. Reset browser default of `1em` to use `rem`s\n// 3. Don't allow content to break outside\n\npre {\n display: block;\n margin-top: 0; // 1\n margin-bottom: 1rem; // 2\n overflow: auto; // 3\n @include font-size($code-font-size);\n color: $pre-color;\n\n // Account for some code outputs that place code tags in pre tags\n code {\n @include font-size(inherit);\n color: inherit;\n word-break: normal;\n }\n}\n\ncode {\n @include font-size($code-font-size);\n color: var(--#{$prefix}code-color);\n word-wrap: break-word;\n\n // Streamline the style when inside anchors to avoid broken underline and more\n a > & {\n color: inherit;\n }\n}\n\nkbd {\n padding: $kbd-padding-y $kbd-padding-x;\n @include font-size($kbd-font-size);\n color: $kbd-color;\n background-color: $kbd-bg;\n @include border-radius($border-radius-sm);\n\n kbd {\n padding: 0;\n @include font-size(1em);\n font-weight: $nested-kbd-font-weight;\n }\n}\n\n\n// Figures\n//\n// Apply a consistent margin strategy (matches our type styles).\n\nfigure {\n margin: 0 0 1rem;\n}\n\n\n// Images and content\n\nimg,\nsvg {\n vertical-align: middle;\n}\n\n\n// Tables\n//\n// Prevent double borders\n\ntable {\n caption-side: bottom;\n border-collapse: collapse;\n}\n\ncaption {\n padding-top: $table-cell-padding-y;\n padding-bottom: $table-cell-padding-y;\n color: $table-caption-color;\n text-align: left;\n}\n\n// 1. Removes font-weight bold by inheriting\n// 2. Matches default `` alignment by inheriting `text-align`.\n// 3. Fix alignment for Safari\n\nth {\n font-weight: $table-th-font-weight; // 1\n text-align: inherit; // 2\n text-align: -webkit-match-parent; // 3\n}\n\nthead,\ntbody,\ntfoot,\ntr,\ntd,\nth {\n border-color: inherit;\n border-style: solid;\n border-width: 0;\n}\n\n\n// Forms\n//\n// 1. Allow labels to use `margin` for spacing.\n\nlabel {\n display: inline-block; // 1\n}\n\n// Remove the default `border-radius` that macOS Chrome adds.\n// See https://github.com/twbs/bootstrap/issues/24093\n\nbutton {\n // stylelint-disable-next-line property-disallowed-list\n border-radius: 0;\n}\n\n// Explicitly remove focus outline in Chromium when it shouldn't be\n// visible (e.g. as result of mouse click or touch tap). It already\n// should be doing this automatically, but seems to currently be\n// confused and applies its very visible two-tone outline anyway.\n\nbutton:focus:not(:focus-visible) {\n outline: 0;\n}\n\n// 1. Remove the margin in Firefox and Safari\n\ninput,\nbutton,\nselect,\noptgroup,\ntextarea {\n margin: 0; // 1\n font-family: inherit;\n @include font-size(inherit);\n line-height: inherit;\n}\n\n// Remove the inheritance of text transform in Firefox\nbutton,\nselect {\n text-transform: none;\n}\n// Set the cursor for non-` +

+ + + + + + + + + + + + + + + + + + + + + +
+

Discrete States

+ +
+
+ +
+
+
+ + + + +
+ +
+

Discrete States#

+

Discrete States are a representation that can only occupy one of a finite set of predefined values (e.g., engine gear or switch). Discrete states are initialized using the function progpy.create_discrete_state(), described below.

+
+
+progpy.create_discrete_state(n_states: int, names: list = None, transition=<function _random_transition>) progpy.discrete_state.DiscreteState#
+
+

New in version 1.8.0.

+
+

Create a discrete state for use with a progpy model. Users construct a discrete state for the default x0 to make that state discrete.

+
+
Parameters
+
    +
  • n_states (int) – Number of possible states.

  • +
  • names (list[str], optional) – Names for states. Defaults to using “State [#]” for each state (e.g., “State 1”)

  • +
  • transition ({function, str}, optional) – Transition logic. Can be either a string (‘random’, ‘none’, or ‘sequential’) or a function (DiscreteState, float)->int of state and disruption to state number. Defaults to “random”.

  • +
+
+
Returns
+

Class to construct a discrete state

+
+
Return type
+

DiscreteState class

+
+
+

Example

+
>>> Switch = create_discrete_state(2, ['on', 'off'])
+>>> x0['switch'] = Switch.off
+
+
+

Example

+
>>> # Representing 'gear' of car
+>>> Gear = create_discrete_state(5, transition='sequential')
+>>> x0['gear'] = Gear(1)
+
+
+

Example

+
>>> # Custom Transition
+>>> import random
+>>> def transition(current_state, amount_added):
+>>>     # this is an example function- in reality it could be anything
+>>>     # Transition in this case is from 1-> any state and
+>>>     #  if not in state 1 can only transition back to 1
+>>>     if current_state == type(current_state)(1) and amount_added > 0.5:
+>>>         return random.randint(0, len(type(current_state)) - 1)
+>>>     elif amount_added > 0.5:
+>>>         return 1
+>>>     # No transition
+>>>     return current_state
+>>> StateType = create_discrete_state(10, transition=transition)
+>>> x = StateType(1)
+
+
+
+ +
+ + +
+ + + + + + + + + + + + +
+ + +
+ + + + + + + + + + + +Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. + + + \ No newline at end of file diff --git a/docs/api_ref/progpy/EnsembleModel.html b/docs/api_ref/progpy/EnsembleModel.html index 9914e224..20aa3fd9 100644 --- a/docs/api_ref/progpy/EnsembleModel.html +++ b/docs/api_ref/progpy/EnsembleModel.html @@ -9,7 +9,7 @@ - EnsembleModel — ProgPy Python Packages 1.7 documentation + EnsembleModel — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,11 +46,12 @@ + - + @@ -153,6 +153,7 @@
  • CompositeModel
  • DataModel
  • Datasets
  • +
  • Discrete States
  • EnsembleModel
  • Included Models
  • LinearModel
  • @@ -170,6 +171,7 @@ +
  • Troubleshooting Guide
  • Release Notes
  • Glossary
  • Developers Guide & Project Plan
  • +
  • Troubleshooting Guide
  • Release Notes
  • Glossary
  • Developers Guide & Project Plan
      @@ -351,7 +353,7 @@

      Included Models

      Battery Model#

      -
      +
      class progpy.models.BatteryElectroChemEOD(**kwargs)#

      Vectorized prognostics model for a battery, represented by an electrochemical equations as described in [Daigle2013]. This model predicts the end of discharge event.

      @@ -413,6 +415,7 @@

      Battery Modelfloat) – Redlich-Kister parameter (- electrode)

    • An (float) – Redlich-Kister parameter (- electrode)

    • VEOD (float) – End of Discharge Voltage Threshold

    • +
    • VDropoff (float) – Voltage above EOD after which voltage will be considered in SOC calculation

    • x0 (dict[str, float]) – Initial state

    @@ -494,17 +497,29 @@

    Battery Model
    class progpy.models.BatteryElectroChemEODEOL(**kwargs)#
    -

    Prognostics model for a battery degredation and discharge, represented by an electrochemical model as described in [Daigle2013] and [Daigle2016]

    +

    Prognostics model for a battery degredation and discharge, represented by an electrochemical model as described in [Daigle2013] and [Daigle2016].

    The default model parameters included are for Li-ion batteries, specifically 18650-type cells. Experimental discharge curves for these cells can be downloaded from the Prognostics Center of Excellence Data Repository [DataRepo].

    Events: (2)
    -
    EOD: End of Discharge
    +
    EOD: End of discharge
    InsufficientCapacity: Insufficient battery capacity
    Inputs/Loading: (1)

    i: Current draw on the battery

    -
    States: (11)

    See BatteryElectroChemEOD, BatteryElectroChemEOL

    +
    States: (11)
    +
    tb: Battery temperature (K)
    +
    Vo: Voltage drops due to solid-phase ohmic resistances
    +
    Vsn: Negative surface voltage (V)
    +
    Vsp: Positive surface voltage (V)
    +
    qnB: Amount of negative ions at the battery bulk
    +
    qnS: Amount of negative ions at the battery surface
    +
    qpB: Amount of positive ions at the battery bulk
    +
    qpS: Amount of positive ions at the battery surface
    +
    qMobile: Maximum battery capacity
    +
    tDiffusion : Diffusion time constant (increasing this causes decrease in diffusion rate)
    +
    Ro : Ohmic drop (current collector resistances plus electrolyte resistance plus solid phase resistances at anode and cathode)
    +
    Outputs (2)
    t: Temperature of battery (°C)
    @@ -512,20 +527,104 @@

    Battery Model +
    Keyword Arguments
    +
      +
    • process_noise (Optional, float or dict[str, float]) – Process noise (applied at dx/next_state). +Can be number (e.g., .2) applied to every state, a dictionary of values for each +state (e.g., {‘x1’: 0.2, ‘x2’: 0.3}), or a function (x) -> x

    • +
    • process_noise_dist (Optional, str) – Distribution for Process noise (e.g., normal, uniform, triangular)

    • +
    • measurement_noise (Optional, float or dict[str, float]) – Measurement noise (applied in output eqn). +Can be number (e.g., .2) applied to every output, a dictionary of values for each +output (e.g., {‘z1’: 0.2, ‘z2’: 0.3}), or a function (z) -> z

    • +
    • measurement_noise_dist (Optional, str) – Distribution for measurement noise (e.g., normal, uniform, triangular)

    • +
    • xnMax (float) – Maximum mole fraction (neg electrode)

    • +
    • xpMax (float) – Maximum mole fraction (pos electrode). Typically 1.

    • +
    • alpha (float) – Anodic/cathodic electrochemical transfer coefficient

    • +
    • Sn (float) – Surface area (- electrode)

    • +
    • Sp (float) – Surface area (+ electrode)

    • +
    • kn (float) – Lumped constant for BV (- electrode)

    • +
    • kp (float) – Lumped constant for BV (+ electrode)

    • +
    • Vol (float) – Total interior battery volume/2 (for computing concentrations)

    • +
    • VolSFraction (float) – Fraction of total volume occupied by surface volume

    • +
    • to (float) – For Ohmic voltage

    • +
    • tsn (float) – For surface overpotential (neg)

    • +
    • tsp (float) – For surface overpotential (pos)

    • +
    • U0p (float) – Redlich-Kister parameter (+ electrode)

    • +
    • Ap (float) – Redlich-Kister parameter (+ electrode)

    • +
    • U0n (float) – Redlich-Kister parameter (- electrode)

    • +
    • An (float) – Redlich-Kister parameter (- electrode)

    • +
    • VEOD (float) – End of discharge voltage threshold

    • +
    • VDropoff (float) – Voltage above EOD after which voltage will be considered in SOC calculation

    • +
    • qMaxThreshold (float) – Threshold for qMax (for threshold_met and event_state), after which the InsufficientCapacity event has occurred. Note: Battery manufacturers specify a threshold of 70-80% of qMax

    • +
    • wq (float) – Wear rate for qMax

    • +
    • wr (float) – Wear rate for Ro

    • +
    • wd (float) – Wear rate for D

    • +
    • x0 (dict[str, float]) – Initial state

    • +
    +
    +

    +
    + +
    class progpy.models.CentrifugalPumpBase(**kwargs)#
    -

    Prognostics model for a Centrifugal Pump as described in [DaiglePump2013].

    +

    Prognostics model for a Centrifugal Pump as described in [DaiglePump2013].

    Events: (4)
    ImpellerWearFailure: Failure of the impeller due to wear
    @@ -689,7 +788,7 @@

    Pump ModelReferences

    -
    DaiglePump2013(1,2)
    +
    DaiglePump2013(1,2)
    1. Daigle and K. Goebel, “Model-based Prognostics with Concurrent Damage Progression Processes,” IEEE Transactions on Systems, Man, and Cybernetics: Systems, vol. 43, no. 4, pp. 535-546, May 2013. https://www.researchgate.net/publication/260652495_Model-Based_Prognostics_With_Concurrent_Damage_Progression_Processes

    @@ -707,7 +806,7 @@

    Pump Model class progpy.models.CentrifugalPumpWithWear(**kwargs)#

    Prognostics model for a centrifugal pump with wear parameters as part of the model state. This is identical to CentrifugalPumpBase, only CentrifugalPumpBase has the wear params as parameters instead of states

    -

    This class implements a Centrifugal Pump model as described in [DaiglePump2013].

    +

    This class implements a Centrifugal Pump model as described in [DaiglePump2013].

    Events: (4)

    See CentrifugalPumpBase

    @@ -740,7 +839,7 @@

    Pneumatic Valve

    class progpy.models.PneumaticValveBase(**kwargs)#
    -

    Prognostics model for a Pneumatic Valve model as described in [DaigleValve2011].

    +

    Prognostics model for a Pneumatic Valve model as described in [DaigleValve2011].

    Events: (5)
    Bottom Leak: Failure due to a leak at the bottom pneumatic port
    @@ -948,7 +1047,7 @@

    DC MotorNew in version 1.3.0.

    Model of triple-phase brushlessDC Motor.

    -

    References: [0]_, [1]_.

    +

    References: [0]_, [1]_.

    This model was developed by NASA’s System Wide Safety (SWS) Project. https://www.nasa.gov/aeroresearch/programs/aosp/sws/

    Events: (0)
    @@ -996,7 +1095,7 @@

    DC MotorReferences

    -
    0
    +
    0

    Matteo Corbetta, Chetan S. Kulkarni. An approach for uncertainty quantification and management of unmanned aerial vehicle health.

    @@ -1018,7 +1117,7 @@

    ESC#Simple Electronic-Speed Controller (ESC) model for powertrain modeling. This model replicates the behavior of the speed controller with pulse-width modulation (PWM) and commutation matrix. Duty cycle simulated with a square wave using scipy signal.square function.

    -

    References: [0]_, [1]_.

    +

    References: [0]_, [1]_.

    This model was developed by NASA’s System Wide Safety (SWS) Project. https://www.nasa.gov/aeroresearch/programs/aosp/sws/

    Events: (0)
    @@ -1064,7 +1163,7 @@

    ESC#

    References

    -
    0
    +
    0

    Matteo Corbetta, Chetan S. Kulkarni. An approach for uncertainty quantification and management of unmanned aerial vehicle health.

    @@ -1198,7 +1297,7 @@

    Aircraft Models[0]_ for modeling details.

    +

    See [0]_ for modeling details.

    Events: (1)

    TrajectoryComplete: The final time of the reference trajectory has been reached

    diff --git a/docs/api_ref/progpy/LinearModel.html b/docs/api_ref/progpy/LinearModel.html index fe91b99d..b9381dbf 100644 --- a/docs/api_ref/progpy/LinearModel.html +++ b/docs/api_ref/progpy/LinearModel.html @@ -9,7 +9,7 @@ - LinearModel — ProgPy Python Packages 1.7 documentation + LinearModel — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,6 +46,7 @@ + @@ -153,6 +153,7 @@
  • CompositeModel
  • DataModel
  • Datasets
  • +
  • Discrete States
  • EnsembleModel
  • Included Models
  • LinearModel
  • @@ -170,6 +171,7 @@

  • +
  • Troubleshooting Guide
  • Release Notes
  • Glossary
  • Developers Guide & Project Plan
      @@ -934,10 +936,11 @@

      LinearModel
      • events (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation. If blank, simulation will occur if any event will be met ()

      • -
      • event_strategy (str, optional) –

        Strategy for stopping evaluation. Default is ‘first’. One of:

        +
      • event_strategy (str or abc.Callable, optional) –

        Strategy for stopping evaluation. Default is ‘first’. One of:

        • first: Will stop when first event in events list is reached.

        • all: Will stop when all events in events list have been reached

        • +
        • abc.Callable: Custom equation to indicate logic for when to stop sim f(thresholds_met) -> bool

      • t0 (float, optional) – Starting time for simulation in seconds (default: 0.0)

      • @@ -953,7 +956,6 @@

        LinearModelfloat, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000

      • first_output (OutputContainer, optional) – First measured output, needed to initialize state for some classes. Can be omitted for classes that don’t use this

      • x (StateContainer, optional) – initial state, e.g., x= m.StateContainer({‘x1’: 10, ‘x2’: -5.3})

      • -
      • thresholds_met_eqn (abc.Callable, optional) – custom equation to indicate logic for when to stop sim f(thresholds_met) -> bool

      • print (bool, optional) –

        toggle intermediate printing, e.g., print = True

        e.g., m.simulate_to_threshold(eqn, z, dt=0.1, save_pts=[1, 2])

      • diff --git a/docs/api_ref/progpy/Loading.html b/docs/api_ref/progpy/Loading.html index 74adda8a..4ee1bec2 100644 --- a/docs/api_ref/progpy/Loading.html +++ b/docs/api_ref/progpy/Loading.html @@ -9,7 +9,7 @@ - Loading — ProgPy Python Packages 1.7 documentation + Loading — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,6 +46,7 @@ + @@ -153,6 +153,7 @@
      • CompositeModel
      • DataModel
      • Datasets
      • +
      • Discrete States
      • EnsembleModel
      • Included Models
      • LinearModel
      • @@ -170,6 +171,7 @@

      +
    • Troubleshooting Guide
    • Release Notes
    • Glossary
    • Developers Guide & Project Plan
        diff --git a/docs/api_ref/progpy/MixtureOfExperts.html b/docs/api_ref/progpy/MixtureOfExperts.html index 01165197..83bd6e67 100644 --- a/docs/api_ref/progpy/MixtureOfExperts.html +++ b/docs/api_ref/progpy/MixtureOfExperts.html @@ -9,7 +9,7 @@ - MixtureOfExperts — ProgPy Python Packages 1.7 documentation + MixtureOfExperts — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,6 +46,7 @@ + @@ -153,6 +153,7 @@
      • CompositeModel
      • DataModel
      • Datasets
      • +
      • Discrete States
      • EnsembleModel
      • Included Models
      • LinearModel
      • @@ -170,6 +171,7 @@
    • +
    • Troubleshooting Guide
    • Release Notes
    • Glossary
    • Developers Guide & Project Plan
        diff --git a/docs/api_ref/progpy/Prediction.html b/docs/api_ref/progpy/Prediction.html index ef3a225f..4cb2ea8d 100644 --- a/docs/api_ref/progpy/Prediction.html +++ b/docs/api_ref/progpy/Prediction.html @@ -9,7 +9,7 @@ - Prediction — ProgPy Python Packages 1.7 documentation + Prediction — ProgPy Python Packages 1.8 documentation @@ -32,11 +32,11 @@ - + @@ -46,9 +46,9 @@ - + @@ -156,6 +156,7 @@
      • CompositeModel
      • DataModel
      • Datasets
      • +
      • Discrete States
      • EnsembleModel
      • Included Models
      • LinearModel
      • @@ -173,6 +174,7 @@
    • +
    • Troubleshooting Guide
    • Release Notes
    • Glossary
    • Developers Guide & Project Plan
        diff --git a/docs/api_ref/progpy/Predictor.html b/docs/api_ref/progpy/Predictor.html index 13203eab..afa722a1 100644 --- a/docs/api_ref/progpy/Predictor.html +++ b/docs/api_ref/progpy/Predictor.html @@ -9,7 +9,7 @@ - Predictors — ProgPy Python Packages 1.7 documentation + Predictors — ProgPy Python Packages 1.8 documentation @@ -32,11 +32,11 @@ - + @@ -46,8 +46,8 @@ - + @@ -155,6 +155,7 @@
      • CompositeModel
      • DataModel
      • Datasets
      • +
      • Discrete States
      • EnsembleModel
      • Included Models
      • LinearModel
      • @@ -172,6 +173,7 @@
    • +
    • Troubleshooting Guide
    • Release Notes
    • Glossary
    • Developers Guide & Project Plan
        diff --git a/docs/api_ref/progpy/PrognosticModel.html b/docs/api_ref/progpy/PrognosticModel.html index fff6db7b..0fff744b 100644 --- a/docs/api_ref/progpy/PrognosticModel.html +++ b/docs/api_ref/progpy/PrognosticModel.html @@ -9,7 +9,7 @@ - PrognosticsModel — ProgPy Python Packages 1.7 documentation + PrognosticsModel — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,6 +46,7 @@ + @@ -153,6 +153,7 @@
      • CompositeModel
      • DataModel
      • Datasets
      • +
      • Discrete States
      • EnsembleModel
      • Included Models
      • LinearModel
      • @@ -170,6 +171,7 @@
    • +
    • Troubleshooting Guide
    • Release Notes
    • Glossary
    • Developers Guide & Project Plan
        @@ -1083,10 +1085,11 @@

        PrognosticsModel
        • events (abc.Sequence[str] or str, optional) – Keys for events that will trigger the end of simulation. If blank, simulation will occur if any event will be met ()

        • -
        • event_strategy (str, optional) –

          Strategy for stopping evaluation. Default is ‘first’. One of:

          +
        • event_strategy (str or abc.Callable, optional) –

          Strategy for stopping evaluation. Default is ‘first’. One of:

          • first: Will stop when first event in events list is reached.

          • all: Will stop when all events in events list have been reached

          • +
          • abc.Callable: Custom equation to indicate logic for when to stop sim f(thresholds_met) -> bool

        • t0 (float, optional) – Starting time for simulation in seconds (default: 0.0)

        • @@ -1102,7 +1105,6 @@

          PrognosticsModelfloat, optional) – maximum time that the model will be simulated forward (s), e.g., horizon = 1000

        • first_output (OutputContainer, optional) – First measured output, needed to initialize state for some classes. Can be omitted for classes that don’t use this

        • x (StateContainer, optional) – initial state, e.g., x= m.StateContainer({‘x1’: 10, ‘x2’: -5.3})

        • -
        • thresholds_met_eqn (abc.Callable, optional) – custom equation to indicate logic for when to stop sim f(thresholds_met) -> bool

        • print (bool, optional) –

          toggle intermediate printing, e.g., print = True

          e.g., m.simulate_to_threshold(eqn, z, dt=0.1, save_pts=[1, 2])

        • diff --git a/docs/api_ref/progpy/SimResult.html b/docs/api_ref/progpy/SimResult.html index 81be5eb7..6c1b2168 100644 --- a/docs/api_ref/progpy/SimResult.html +++ b/docs/api_ref/progpy/SimResult.html @@ -9,7 +9,7 @@ - SimResult — ProgPy Python Packages 1.7 documentation + SimResult — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,6 +46,7 @@ + @@ -153,6 +153,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -170,6 +171,7 @@

        +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          diff --git a/docs/api_ref/progpy/StateEstimator.html b/docs/api_ref/progpy/StateEstimator.html index 47bc3f50..aee12a8d 100644 --- a/docs/api_ref/progpy/StateEstimator.html +++ b/docs/api_ref/progpy/StateEstimator.html @@ -9,7 +9,7 @@ - State Estimators — ProgPy Python Packages 1.7 documentation + State Estimators — ProgPy Python Packages 1.8 documentation @@ -32,11 +32,11 @@ - + @@ -46,8 +46,8 @@ - + @@ -155,6 +155,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -172,6 +173,7 @@
      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -339,7 +341,7 @@

          Contents

          State Estimators#

          -

          The State Estimator uses sensor information and a Prognostics Model to produce an estimate of system state (which can be used to estimate outputs, event_states, and performance metrics). This state estimate can either be used by itself or as input to a Predictor. A state estimator is typically run each time new information is available.

          +

          The State Estimator uses sensor information and a Prognostics Model to produce an estimate of system state (which can be used to estimate outputs, event_states, and performance metrics). This state estimate can either be used by itself or as input to a Predictor. A state estimator is typically run each time new information is available.

          Here’s an example of its use. In this example we use the unscented kalman filter state estimator and the ThrownObject model.

          >>> from progpy.models import ThrownObject
           >>> from progpy.state_estimators import UnscentedKalmanFilter
          diff --git a/docs/api_ref/progpy/ToEPredictionProfile.html b/docs/api_ref/progpy/ToEPredictionProfile.html
          index 7ba0cb83..b74f8a49 100644
          --- a/docs/api_ref/progpy/ToEPredictionProfile.html
          +++ b/docs/api_ref/progpy/ToEPredictionProfile.html
          @@ -9,7 +9,7 @@
               
               
           
          -    ToEPredictionProfile — ProgPy Python Packages 1.7 documentation
          +    ToEPredictionProfile — ProgPy Python Packages 1.8 documentation
             
             
             
          @@ -32,7 +32,6 @@
               
               
               
          -    
               
               
               
          @@ -48,6 +47,7 @@
               
               
               
          +    
               
               
               
          @@ -154,6 +154,7 @@
           
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -171,6 +172,7 @@
      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          diff --git a/docs/api_ref/progpy/UncertainData.html b/docs/api_ref/progpy/UncertainData.html index d32d0be7..2ebc796d 100644 --- a/docs/api_ref/progpy/UncertainData.html +++ b/docs/api_ref/progpy/UncertainData.html @@ -9,7 +9,7 @@ - Uncertain Data — ProgPy Python Packages 1.7 documentation + Uncertain Data — ProgPy Python Packages 1.8 documentation @@ -32,11 +32,11 @@ - + @@ -46,9 +46,9 @@ - + @@ -156,6 +156,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -173,6 +174,7 @@
      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -340,7 +342,7 @@

          Contents

          Uncertain Data#

          -

          The progpy.uncertain_data package includes classes for representing data with uncertainty. All types of UncertainData can be operated on using the interface. Inidividual classes for representing uncertain data of different kinds are described below, in Implemented UncertainData Types.

          +

          The progpy.uncertain_data package includes classes for representing data with uncertainty. All types of UncertainData can be operated on using the interface. Individual classes for representing uncertain data of different kinds are described below, in Implemented UncertainData Types.

          Interface#

          @@ -349,7 +351,7 @@

          Interface
          -abstract property cov#
          +abstract property cov: numpy.array#

          The covariance matrix of the UncertiantyData distribution or samples in order of keys (i.e., cov[1][1] is the standard deviation for key keys()[1])

          Returns
          @@ -412,7 +414,7 @@

          Interface
          -abstract property mean#
          +abstract property mean: dict#

          The mean of the UncertainData distribution or samples

          Returns
          @@ -430,7 +432,7 @@

          Interface
          -abstract property median#
          +abstract property median: dict#

          The median of the UncertainData distribution or samples

          Returns
          diff --git a/docs/api_ref/progpy/Utils.html b/docs/api_ref/progpy/Utils.html index 5000a714..8fe900ba 100644 --- a/docs/api_ref/progpy/Utils.html +++ b/docs/api_ref/progpy/Utils.html @@ -9,7 +9,7 @@ - Utils — ProgPy Python Packages 1.7 documentation + Utils — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,10 +46,11 @@ + - + @@ -153,6 +153,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -170,6 +171,7 @@

      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -432,11 +434,11 @@

          Trajectory

          next

          -

          Release Notes

          +

          Troubleshooting Guide

          diff --git a/docs/auto_examples/benchmarking.html b/docs/auto_examples/benchmarking.html deleted file mode 100644 index 5f1e4964..00000000 --- a/docs/auto_examples/benchmarking.html +++ /dev/null @@ -1,463 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example benchmarking the computational efficiency of models.

          -
          from timeit import timeit
          -from progpy.models import BatteryCircuit
          -
          -def run_example():
          -    # Step 1: Create a model object
          -    batt = BatteryCircuit()
          -
          -    # Step 2: Define future loading function
          -    def future_loading(t, x=None):
          -        # Constant Loading
          -        return batt.InputContainer({'i': 2})
          -
          -    # Step 3: Benchmark simulation of 600 seconds
          -    print('Benchmarking...')
          -    def sim():
          -        results = batt.simulate_to(600, future_loading)
          -    time = timeit(sim, number=500)
          -
          -    # Print results
          -    print('Simulation Time: {} ms/sim'.format(time*2))
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/custom_model.html b/docs/auto_examples/custom_model.html deleted file mode 100644 index d78d6e55..00000000 --- a/docs/auto_examples/custom_model.html +++ /dev/null @@ -1,551 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example building a custom model with LSTMStateTransitionModel.

          -

          For most cases, you will be able to use the standard LSTMStateTransitionModel.from_data class with configuration (see the LSTMStateTransitionModel class for more details). However, sometimes you might want to add custom layers, or other complex components. In that case, you will build a custom model and pass it into LSTMStateTransitionModel.

          -

          In this example, we generate fake data using the BatteryElectroChemEOD model. This is a case where we’re generating a surrogate model from the physics-based model. For cases where you’re generating a model from data (e.g., collected from a testbed or a real-world environment), you’ll replace that generated data with your own.

          -

          We build and fit a custom model using keras.layers. Finally, we compare performance to the standard format and the original model.

          -
          import matplotlib.pyplot as plt
          -import numpy as np
          -from tensorflow import keras
          -from tensorflow.keras import layers
          -
          -from progpy.data_models import LSTMStateTransitionModel
          -from progpy.models import BatteryElectroChemEOD
          -
          -def run_example():
          -    print('Generating data...')
          -    batt = BatteryElectroChemEOD()
          -    future_loading_eqns = [lambda t, x=None: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)]
          -    # Generate data with different loading and step sizes
          -    # Adding the step size as an element of the output
          -    training_data = []
          -    input_data = []
          -    output_data = []
          -    for i in range(9):
          -        dt = i/3+0.25
          -        for loading_eqn in future_loading_eqns:
          -            d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt)
          -            u = np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float)
          -            z = d.outputs
          -            training_data.append((u, z))
          -            input_data.append(u)
          -            output_data.append(z)
          -
          -    # Step 2: Build standard model
          -    print("Building standard model...")
          -    m_batt = LSTMStateTransitionModel.from_data(
          -        inputs = input_data,
          -        outputs = output_data,
          -        window=12,
          -        epochs=30,
          -        units=64,  # Additional units given the increased complexity of the system
          -        input_keys = ['i', 'dt'],
          -        output_keys = ['t', 'v'])
          -
          -    # Step 3: Build custom model
          -    print('Building custom model...')
          -    (u_all, z_all) = LSTMStateTransitionModel.pre_process_data(training_data, window=12)
          -
          -    # Normalize
          -    n_inputs = len(training_data[0][0][0])
          -    u_mean = np.mean(u_all[:,0,:n_inputs], axis=0)
          -    u_std = np.std(u_all[:,0,:n_inputs], axis=0)
          -    # If there's no variation- dont normalize
          -    u_std[u_std == 0] = 1
          -    z_mean = np.mean(z_all, axis=0)
          -    z_std = np.std(z_all, axis=0)
          -    # If there's no variation- dont normalize
          -    z_std[z_std == 0] = 1
          -
          -    # Add output (since z_t-1 is last input)
          -    u_mean = np.hstack((u_mean, z_mean))
          -    u_std = np.hstack((u_std, z_std))
          -
          -    u_all = (u_all - u_mean)/u_std
          -    z_all = (z_all - z_mean)/z_std
          -
          -    # u_mean and u_std act on the column vector form (from inputcontainer)
          -    # so we need to transpose them to a column vector
          -    normalization = (u_mean[np.newaxis].T, u_std[np.newaxis].T, z_mean, z_std)
          -
          -    callbacks = [
          -        keras.callbacks.ModelCheckpoint("jena_sense.keras", save_best_only=True)
          -    ]
          -    inputs = keras.Input(shape=u_all.shape[1:])
          -    x = layers.Bidirectional(layers.LSTM(128))(inputs)
          -    x = layers.Dropout(0.1)(x)
          -    x = layers.Dense(z_all.shape[1] if z_all.ndim == 2 else 1)(x)
          -    model = keras.Model(inputs, x)
          -    model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
          -    model.fit(u_all, z_all, epochs=30, callbacks = callbacks, validation_split = 0.1)
          -
          -    # Step 4: Build LSTMStateTransitionModel
          -    m_custom = LSTMStateTransitionModel(model,
          -        normalization=normalization,
          -        input_keys = ['i', 'dt'],
          -        output_keys = ['t', 'v']
          -    )
          -
          -    # Step 5: Simulate
          -    print('Simulating...')
          -    t_counter = 0
          -    x_counter = batt.initialize()
          -    def future_loading(t, x=None):
          -        return batt.InputContainer({'i': 3})
          -
          -    def future_loading2(t, x = None):
          -        nonlocal t_counter, x_counter
          -        z = batt.output(x_counter)
          -        z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter})
          -        x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter)
          -        t_counter = t
          -        return z
          -    data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1)
          -    results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)
          -    results_custom = m_custom.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)
          -
          -    # Step 6: Compare performance
          -    print('Comparing performance...')
          -    data.outputs.plot(title='original model', compact=False)
          -    results.outputs.plot(title='generated model', compact=False)
          -    results_custom.outputs.plot(title='custom model', compact=False)
          -    plt.show()
          -
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/dataset.html b/docs/auto_examples/dataset.html deleted file mode 100644 index 3c7c66ac..00000000 --- a/docs/auto_examples/dataset.html +++ /dev/null @@ -1,499 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example downloading and using a NASA prognostics dataset.

          -

          In this example, a battery dataset is downloaded from the NASA PCoE data repository. This dataset is then accessed and plotted.

          -
          DATASET_ID = 1
          -
          -def run_example():
          -    # Step 1: Download and import the dataset for a single battery
          -    # Note: This may take some time
          -    from progpy.datasets import nasa_battery
          -    print('Downloading... ', end='')
          -    (desc, data) = nasa_battery.load_data(DATASET_ID)
          -    print('done')
          -
          -    # We recommend saving the dataset to disk for future use
          -    # This way you dont have to download it each time
          -    import pickle
          -    pickle.dump((desc, data), open(f'dataset_{DATASET_ID}.pkl', 'wb'))
          -
          -    # Step 2: Access the dataset description
          -    print(f'\nDataset {DATASET_ID}')
          -    print(desc['description'])
          -    print(f'Procedure: {desc["procedure"]}')
          -
          -    # Step 3: Access the dataset data
          -    # Data is in format [run_id][time][variable]
          -    # For the battery the variables are
          -    #    0: relativeTime (since beginning of run)
          -    #    1: current (amps)
          -    #    2: voltage
          -    #    3: temperature (°C)
          -    # so that data[a][b, 3] is the temperature at time index b (relative to the start of the run) for run a
          -    print(f'\nNumber of runs: {len(data)}')
          -    print(f'\nAnalyzing run 4')
          -    print(f'number of time indices: {len(data[4])}')
          -    print(f"Details of run 4: {desc['runs'][4]}")
          -
          -    # Plot the run
          -    import matplotlib.pyplot as plt
          -    plt.figure()
          -    plt.subplot(2, 1, 1)
          -    plt.plot(data[4][:, 0], data[4][:, 1])
          -    plt.ylabel('Current (A)')
          -
          -    plt.subplot(2, 1, 2)
          -    plt.plot(data[4][:, 0], data[4][:, 2])
          -    plt.ylabel('Voltage (V)')
          -    plt.xlabel('Time (s)')
          -    plt.title('Run 4')
          -
          -    # Graph all reference discharge profiles
          -    indices = [i for i, x in enumerate(desc['runs']) if 'reference discharge' in x['desc'] and 'rest' not in x['desc']]
          -    plt.figure()
          -    for i in indices:
          -        plt.plot(data[i][:, 0], data[i][:, 2], label=f"Run {i}")
          -    plt.title('Reference discharge profiles')
          -    plt.xlabel('Time (s)')
          -    plt.ylabel('Voltage (V)')
          -    plt.show()
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/derived_params.html b/docs/auto_examples/derived_params.html deleted file mode 100644 index f9795dff..00000000 --- a/docs/auto_examples/derived_params.html +++ /dev/null @@ -1,478 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating ways to use the derived parameters feature for model building.

          -
          from progpy.models.thrown_object import ThrownObject
          -
          -def run_example():
          -    # For this example we will use the ThrownObject model from the new_model example.
          -    # We will extend that model to include a derived parameter
          -    # Let's assume that the throwing_speed was actually a function of thrower_height
          -    # (i.e., a taller thrower would throw the ball faster).
          -    # Here's how we would implement that
          -
          -    # Step 1: Define a function for the relationship between thrower_height and throwing_speed.
          -    def update_thrown_speed(params):
          -        return {
          -            'throwing_speed': params['thrower_height'] * 21.85
          -        }  # Assumes thrown_speed is linear function of height
          -    # Note: one or more parameters can be changed in these functions, whatever parameters are changed are returned in the dictionary
          -
          -    # Step 2: Define the param callbacks
          -    ThrownObject.param_callbacks.update({
          -            'thrower_height': [update_thrown_speed]
          -        })  # Tell the derived callbacks feature to call this function when thrower_height changes.
          -    # Note: Usually we would define this method within the class
          -    #  for this example, we're doing it separately to improve readability
          -    # Note2: You can also have more than one function be called when a single parameter is changed.
          -    #  Do this by adding the additional callbacks to the list (e.g., 'thrower_height': [update_thrown_speed, other_fcn])
          -
          -    # Step 3: Use!
          -    obj = ThrownObject()
          -    print("Default Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed']))
          -
          -    # Now let's change the thrower_height
          -    print("changing height...")
          -    obj.parameters['thrower_height'] = 1.75  # Our thrower is 1.75 m tall
          -    print("\nUpdated Settings:\n\tthrower_height: {}\n\tthowing_speed: {}".format(obj.parameters['thrower_height'], obj.parameters['throwing_speed']))
          -    print("Notice how speed changed automatically with height")
          -
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/dynamic_step_size.html b/docs/auto_examples/dynamic_step_size.html deleted file mode 100644 index f54aa853..00000000 --- a/docs/auto_examples/dynamic_step_size.html +++ /dev/null @@ -1,488 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating ways to use the dynamic step size feature. This feature allows users to define a time-step that changes with time or state.

          -
          import prog_models
          -from progpy.models.thrown_object import ThrownObject
          -
          -def run_example():
          -    print("EXAMPLE 1: dt of 1 until 8 sec, then 0.5\n\nSetting up...\n")
          -    # Step 1: Create instance of model
          -    m = ThrownObject()
          -
          -    # Step 2: Setup for simulation
          -    def future_load(t, x=None):
          -        return {}
          -
          -    # Step 3: Define dynamic step size function
          -    # This `next_time` function will specify what the next step of the simulation should be at any state and time.
          -    # f(x, t) -> (t, dt)
          -    def next_time(t, x):
          -        # In this example dt is a function of time. We will use a dt of 1 for the first 8 seconds, then 0.5
          -        if t < 8:
          -            return 1
          -        return 0.5
          -
          -    # Step 4: Simulate to impact
          -    # Here we're printing every time step so we can see the step size change
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact'])
          -
          -    # Example 2
          -    print("EXAMPLE 2: dt of 1 until impact event state 0.5, then 0.25 \n\nSetting up...\n")
          -
          -    # Step 3: Define dynamic step size function
          -    # This `next_time` function will specify what the next step of the simulation should be at any state and time.
          -    # f(x, t) -> (t, dt)
          -    def next_time(t, x):
          -        # In this example dt is a function of state. Uses a dt of 1 until impact event state 0.5, then 0.25
          -        event_state = m.event_state(x)
          -        if event_state['impact'] < 0.5:
          -            return 0.25
          -        return 1
          -
          -    # Step 4: Simulate to impact
          -    # Here we're printing every time step so we can see the step size change
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, save_freq=1e-99, print=True, dt=next_time, threshold_keys=['impact'])
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/events.html b/docs/auto_examples/events.html deleted file mode 100644 index a5b5dd9a..00000000 --- a/docs/auto_examples/events.html +++ /dev/null @@ -1,521 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example further illustrating the concept of ‘events’ which generalizes EOL.

          -

          ‘Events’ is the term used to describe something to be predicted. -Generally in the PHM community these are referred to as End of Life (EOL). -However, they can be much more.

          -

          In the prog_models package, events can be anything that needs to be predicted. -Events can represent End of Life (EOL), End of Mission (EOM), warning thresholds, or any Event of Interest (EOI).

          -

          This example demonstrates how events can be used in your applications.

          -
          from progpy.models import BatteryElectroChemEOD
          -
          -def run_example():
          -    # Example: Warning thresholds
          -    # In this example we will use the battery model
          -    # We of course are interested in end of discharge, but for this example we
          -    # have a requirement that says the battery must not fall below 5% State of Charge (SOC)
          -    # Note: SOC is the event state for the End of Discharge (EOD) event
          -    # Event states, like SOC go between 0 and 1, where 1 is healthy and at 0 the event has occured.
          -    # So, 5% SOC corresponds to an 'EOD' event state of 0.05
          -    # Additionally, we have two warning thresholds (yellow and red)
          -
          -    YELLOW_THRESH = 0.15
          -    RED_THRESH = 0.1
          -    THRESHOLD = 0.05
          -
          -    # Step 1: Extend the battery model to define the additional events
          -    class MyBatt(BatteryElectroChemEOD):
          -        events = BatteryElectroChemEOD.events + ['EOD_warn_yellow', 'EOD_warn_red', 'EOD_requirement_threshold']
          -
          -        def event_state(self, state):
          -            # Get event state from parent
          -            event_state = super().event_state(state)
          -
          -            # Add yellow, red, and failure states by scaling EOD state
          -            # Here we scale so the threshold SOC is 0 by their associated events, while SOC of 1 is still 1
          -            # For example, for yellow we want EOD_warn_yellow to be 1 when SOC is 1, and 0 when SOC is YELLOW_THRESH or lower
          -            event_state['EOD_warn_yellow'] = (event_state['EOD']-YELLOW_THRESH)/(1-YELLOW_THRESH)
          -            event_state['EOD_warn_red'] = (event_state['EOD']-RED_THRESH)/(1-RED_THRESH)
          -            event_state['EOD_requirement_threshold'] = (event_state['EOD']-THRESHOLD)/(1-THRESHOLD)
          -
          -            # Return
          -            return event_state
          -
          -        def threshold_met(self, x):
          -            # Get threshold met from parent
          -            t_met =  super().threshold_met(x)
          -
          -            # Add yell and red states from event_state
          -            event_state = self.event_state(x)
          -            t_met['EOD_warn_yellow'] = event_state['EOD_warn_yellow'] <= 0
          -            t_met['EOD_warn_red'] = event_state['EOD_warn_red'] <= 0
          -            t_met['EOD_requirement_threshold'] = event_state['EOD_requirement_threshold'] <= 0
          -
          -            return t_met
          -
          -    # Step 2: Use it
          -    m = MyBatt()
          -
          -    # 2a: Setup model
          -    def future_loading(t, x=None):
          -        # Variable (piece-wise) future loading scheme
          -        # For a battery, future loading is in term of current 'i' in amps.
          -        if (t < 600):
          -            i = 2
          -        elif (t < 900):
          -            i = 1
          -        elif (t < 1800):
          -            i = 4
          -        elif (t < 3000):
          -            i = 2
          -        else:
          -            i = 3
          -        return m.InputContainer({'i': i})
          -
          -    # 2b: Simulate to threshold
          -    simulated_results = m.simulate_to_threshold(future_loading, threshold_keys=['EOD'], print = True)
          -
          -    # 2c: Plot results
          -    simulated_results.event_states.plot()
          -    import matplotlib.pyplot as plt
          -    plt.show()
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/full_lstm_model.html b/docs/auto_examples/full_lstm_model.html deleted file mode 100644 index 9a8005e8..00000000 --- a/docs/auto_examples/full_lstm_model.html +++ /dev/null @@ -1,553 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example building a full model with events and thresholds using LSTMStateTransitionModel.

          -

          In this example, we generate fake data using the ThrownObject model. This is a case where we’re generating a surrogate model from the physics-based model. For cases where you’re generating a model from data (e.g., collected from a testbed or a real-world environment), you’ll replace that generated data with your own.

          -

          We then create a subclass of the LSTMStateTransitionModel, defining the event_state and threshold equations as a function of output. We use the generated model and compare to the original model.

          -
          import matplotlib.pyplot as plt
          -import numpy as np
          -
          -from progpy.data_models import LSTMStateTransitionModel
          -from progpy.models import ThrownObject
          -
          -def run_example():
          -    # -----------------------------------------------------
          -    # Method 1 - manual definition
          -    # In this example we complete the models by manually defining event_state
          -    # and thresholds_met as function of output.
          -    # -----------------------------------------------------
          -    TIMESTEP = 0.01
          -    m = ThrownObject()
          -    def future_loading(t, x=None):
          -        return m.InputContainer({})  # No input for thrown object
          -
          -    # Step 1: Generate additional data
          -    # We will use data generated above, but we also want data at additional timesteps
          -    print('Generating data...')
          -    data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP)
          -    data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)
          -    data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)
          -    data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)
          -    data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)
          -
          -    # Step 2: Data Prep
          -    # We need to add the timestep as a input
          -    u = np.array([[TIMESTEP] for _ in data.inputs])
          -    u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs])
          -    u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs])
          -    u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs])
          -    u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs])
          -
          -    # In this case we are saying that velocity is directly measurable,
          -    # unlike the original model. This is necessary to calculate the events.
          -    # Since the outputs will then match the states, we pass in the states below
          -
          -    u_data = [u, u_half, u_quarter, u_twice, u_four]
          -    z_data = [data.states, data_half.states, data_quarter.states, data_twice.states, data_four.states]
          -
          -    # Step 3: Create model
          -    print('Creating model...')
          -
          -    # Create a subclass of LSTMStateTransitionModel,
          -    # overridding event-related methods and members
          -    class LSTMThrownObject(LSTMStateTransitionModel):
          -        events = [
          -            'falling', # Event- object is falling
          -            'impact' # Event- object has impacted ground
          -        ]
          -
          -        def initialize(self, u=None, z=None):
          -            # Add logic required for thrown object
          -            self.max_x = 0.0
          -            return super().initialize(u, z)
          -
          -        def event_state(self, x):
          -            # Using class name instead of self allows the class to be subclassed
          -            z = LSTMThrownObject.output(self, x)
          -            # Logic from ThrownObject.event_state, using output instead of state
          -            self.max_x = max(self.max_x, z['x'])  # Maximum altitude
          -            return {
          -                'falling': max(z['v']/self.parameters['throwing_speed'],0),  # Throwing speed is max speed
          -                'impact': max(z['x']/self.max_x,0)  # 1 until falling begins, then it's fraction of height
          -            }
          -
          -        def threshold_met(self, x):
          -            z = LSTMThrownObject.output(self, x)
          -            # Logic from ThrownObject.threshold_met, using output instead of state
          -            return {
          -                'falling': z['v'] < 0,
          -                'impact': z['x'] <= 0
          -            }
          -
          -    # Step 4: Generate Model
          -    print('Building model...')
          -    m2 = LSTMThrownObject.from_data(
          -        inputs=u_data,
          -        outputs=z_data,
          -        window=4,
          -        epochs=30,
          -        input_keys = ['dt'],
          -        output_keys = m.states)
          -
          -    # Step 5: Simulate with model
          -    t_counter = 0
          -    x_counter = m.initialize()
          -    def future_loading3(t, x = None):
          -        nonlocal t_counter, x_counter
          -        z = m2.InputContainer({'x_t-1': x_counter['x'], 'v_t-1': x_counter['v'], 'dt': t - t_counter})
          -        x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)
          -        t_counter = t
          -        return z
          -
          -    # Use new dt, not used in training
          -    # Using a dt not used in training will demonstrate the model's
          -    # ability to handle different timesteps not part of training set
          -    data = m.simulate_to_threshold(future_loading, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)
          -    results3 = m2.simulate_to_threshold(future_loading3, threshold_keys='impact', dt=TIMESTEP*3, save_freq=TIMESTEP*3)
          -
          -    # Step 6: Compare Results
          -    print('Comparing results...')
          -    print('Predicted impact time:')
          -    print('\tOriginal: ', data.times[-1])
          -    print('\tLSTM: ', results3.times[-1])
          -    data.outputs.plot(title='original model')
          -    results3.outputs.plot(title='generated model')
          -    plt.show()
          -
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/future_loading.html b/docs/auto_examples/future_loading.html deleted file mode 100644 index 359ca76a..00000000 --- a/docs/auto_examples/future_loading.html +++ /dev/null @@ -1,608 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating ways to use future loading.

          -
          from progpy.models import BatteryCircuit
          -from statistics import mean
          -from numpy.random import normal
          -
          -def run_example():
          -    m = BatteryCircuit()
          -
          -    ## Example 1: Variable loading
          -    def future_loading(t, x=None):
          -        # Variable (piece-wise) future loading scheme
          -        if (t < 600):
          -            i = 2
          -        elif (t < 900):
          -            i = 1
          -        elif (t < 1800):
          -            i = 4
          -        elif (t < 3000):
          -            i = 2
          -        else:
          -            i = 3
          -        return m.InputContainer({'i': i})
          -
          -    # Simulate to threshold
          -    options = {
          -        'save_freq': 100,  # Frequency at which results are saved
          -        'dt': 2  # Timestep
          -    }
          -    simulated_results = m.simulate_to_threshold(future_loading, **options)
          -
          -    # Now lets plot the inputs and event_states
          -    simulated_results.inputs.plot(ylabel = 'Variable Load Current (amps)')
          -    simulated_results.event_states.plot(ylabel = 'Variable Load Event State')
          -
          -    ## Example 2: Moving Average loading
          -    # This is useful in cases where you are running reoccuring simulations, and are measuring the actual load on the system,
          -    # but dont have a good way of predicting it, and you expect loading to be steady
          -
          -    def future_loading(t, x=None):
          -        return future_loading.load
          -    future_loading.load = m.InputContainer({key : 0 for key in m.inputs})
          -
          -    # Lets define another function to handle the moving average logic
          -    window = 10 # Number of elements in window
          -    def moving_avg(i):
          -        for key in m.inputs:
          -            moving_avg.loads[key].append(i[key])
          -            if len(moving_avg.loads[key]) > window:
          -                del moving_avg.loads[key][0]  # Remove first item
          -
          -        # Update future loading eqn
          -        future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs}
          -    moving_avg.loads = {key : [] for key in m.inputs}
          -
          -    # OK, we've setup the logic of the moving average.
          -    # Now lets say you have some measured loads to add
          -    measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2]
          -
          -    # We're going to feed these into the future loading eqn
          -    for load in measured_loads:
          -        moving_avg({'i': load})
          -
          -    # Now the future_loading eqn is setup to use the moving average of whats been seen
          -    # Simulate to threshold
          -    simulated_results = m.simulate_to_threshold(future_loading, **options)
          -
          -    # Now lets plot the inputs and event_states
          -    simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)')
          -    simulated_results.event_states.plot(ylabel = 'Moving Average Event State')
          -
          -    # In this case, this estimate is wrong because loading will not be steady, but at least it would give you an approximation.
          -
          -    # If more measurements are received, the user could estimate the moving average here and then run a new simulation.
          -
          -    ## Example 3: Gaussian Distribution
          -    # In this example we will still be doing a variable loading like the first option, but we are going to use a
          -    # gaussian distribution for each input.
          -
          -    def future_loading(t, x=None):
          -        # Variable (piece-wise) future loading scheme
          -        if (t < 600):
          -            i = 2
          -        elif (t < 900):
          -            i = 1
          -        elif (t < 1800):
          -            i = 4
          -        elif (t < 3000):
          -            i = 2
          -        else:
          -            i = 3
          -        return m.InputContainer({'i': normal(i, future_loading.std)})
          -    future_loading.std = 0.2
          -
          -    # Simulate to threshold
          -    simulated_results = m.simulate_to_threshold(future_loading, **options)
          -
          -    # Now lets plot the inputs and event_states
          -    simulated_results.inputs.plot(ylabel = 'Variable Gaussian Current (amps)')
          -    simulated_results.event_states.plot(ylabel = 'Variable Gaussian Event State')
          -
          -    # Example 4: Gaussian- increasing with time
          -    # For this we're using moving average. This is realistic because the further out from current time you get,
          -    # the more uncertainty there is in your prediction.
          -
          -    def future_loading(t, x=None):
          -        std = future_loading.base_std + future_loading.std_slope * (t - future_loading.t)
          -        return {key : normal(future_loading.load[key], std) for key in future_loading.load.keys()}
          -    future_loading.load = {key : 0 for key in m.inputs}
          -    future_loading.base_std = 0.001
          -    future_loading.std_slope = 1e-4
          -    future_loading.t = 0
          -
          -    # Lets define another function to handle the moving average logic
          -    window = 10  # Number of elements in window
          -    def moving_avg(i):
          -        for key in m.inputs:
          -            moving_avg.loads[key].append(i[key])
          -            if len(moving_avg.loads[key]) > window:
          -                del moving_avg.loads[key][0]  # Remove first item
          -
          -        # Update future loading eqn
          -        future_loading.load = {key : mean(moving_avg.loads[key]) for key in m.inputs}
          -    moving_avg.loads = {key : [] for key in m.inputs}
          -
          -    # OK, we've setup the logic of the moving average.
          -    # Now lets say you have some measured loads to add
          -    measured_loads = [10, 11.5, 12.0, 8, 2.1, 1.8, 1.99, 2.0, 2.01, 1.89, 1.92, 2.01, 2.1, 2.2]
          -
          -    # We're going to feed these into the future loading eqn
          -    for load in measured_loads:
          -        moving_avg({'i': load})
          -
          -    # Simulate to threshold
          -    simulated_results = m.simulate_to_threshold(future_loading, **options)
          -
          -    # Now lets plot the inputs and event_states
          -    simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)')
          -    simulated_results.event_states.plot(ylabel = 'Moving Average Event State')
          -
          -    # In this example future_loading.t has to be updated with current time before each prediction.
          -
          -    # Example 5 Function of state
          -    # here we're pretending that input is a function of SOC. It increases as we approach SOC
          -
          -    def future_loading(t, x=None):
          -        if x is not None:
          -            event_state = future_loading.event_state(x)
          -            return m.InputContainer({'i': future_loading.start + (1-event_state['EOD']) * future_loading.slope})  # default
          -        return m.InputContainer({'i': future_loading.start})
          -    future_loading.t = 0
          -    future_loading.event_state = m.event_state
          -    future_loading.slope = 2  # difference between input with EOD = 1 and 0.
          -    future_loading.start = 0.5
          -
          -    # Simulate to threshold
          -    simulated_results = m.simulate_to_threshold(future_loading, **options)
          -
          -    # Now lets plot the inputs and event_states
          -    simulated_results.inputs.plot(ylabel = 'Moving Average Current (amps)')
          -    simulated_results.event_states.plot(ylabel = 'Moving Average Event State')
          -
          -    # In this example future_loading.t has to be updated with current time before each prediction.
          -
          -    # Show plots
          -    import matplotlib.pyplot as plt
          -    plt.show()
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/generate_surrogate.html b/docs/auto_examples/generate_surrogate.html deleted file mode 100644 index cceece25..00000000 --- a/docs/auto_examples/generate_surrogate.html +++ /dev/null @@ -1,599 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example of generating a Dynamic Mode Decomposition surrogate model using the battery model

          -
          from progpy.models import BatteryElectroChemEOD as Battery
          -
          -import matplotlib.pyplot as plt
          -
          -def run_example():
          -    ### Example 1: Standard DMD Application
          -    ## Step 1: Create a model object
          -    batt = Battery()
          -
          -    ## Step 2: Define future loading functions for training data
          -    # Here, we define two specific loading profiles. These could also be generated programmatically, for as many loading profiles as desired
          -    def future_loading_1(t, x=None):
          -        # Variable (piece-wise) future loading scheme
          -        if (t < 500):
          -            i = 3
          -        elif (t < 1000):
          -            i = 2
          -        elif (t < 1500):
          -            i = 0.5
          -        else:
          -            i = 4.5
          -        return batt.InputContainer({'i': i})
          -
          -    def future_loading_2(t, x=None):
          -        # Variable (piece-wise) future loading scheme
          -        if (t < 300):
          -            i = 2
          -        elif (t < 800):
          -            i = 3.5
          -        elif (t < 1300):
          -            i = 4
          -        elif (t < 1600):
          -            i = 1.5
          -        else:
          -            i = 5
          -        return batt.InputContainer({'i': i})
          -
          -    load_functions = [future_loading_1, future_loading_2]
          -
          -    ## Step 3: generate surrogate model
          -    # Simulation options for training data and surrogate model generation
          -    # Note: here dt is less than save_freq. This means the model will iterate forward multiple steps per saved point.
          -    # This is commonly done to ensure accuracy.
          -    options_surrogate = {
          -        'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated
          -        'dt': 0.1, # For DMD, this value is the time step of the training data
          -        'trim_data_to': 0.7 # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model
          -    }
          -
          -    # Set noise in Prognostics Model, default for surrogate model is also this value
          -    batt.parameters['process_noise'] = 0
          -
          -    # Generate surrogate model
          -    surrogate = batt.generate_surrogate(load_functions,**options_surrogate)
          -
          -    ## Step 4: Use surrogate model
          -    # Simulation options for implementation of surrogate model
          -    options_sim = {
          -        'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results
          -    }
          -
          -    # Define loading profile
          -    def future_loading(t, x=None):
          -        if (t < 600):
          -            i = 3
          -        elif (t < 1000):
          -            i = 2
          -        elif (t < 1500):
          -            i = 1.5
          -        else:
          -            i = 4
          -        return batt.InputContainer({'i': i})
          -
          -    # Simulate to threshold using DMD approximation
          -    simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim)
          -
          -    # Calculate Error
          -    MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs)
          -    print('Example 1 MSE:',MSE)
          -    # Not a very good approximation
          -
          -    # Plot results
          -    simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 1 Input')
          -    simulated_results.outputs.plot(ylabel = 'Predicted Outputs (temperature and voltage)',title='Example 1 Predicted Outputs')
          -    simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 1 Predicted SOC')
          -
          -    # To visualize the accuracy of the approximation, run the high-fidelity model
          -    options_hf = {
          -        'dt': 0.1,
          -        'save_freq': 1,
          -    }
          -    high_fidelity_results = batt.simulate_to_threshold(future_loading,**options_hf)
          -
          -    # Save voltage results to compare
          -    voltage_dmd = [simulated_results.outputs[iter1]['v'] for iter1 in range(len(simulated_results.times))]
          -    voltage_hf = [high_fidelity_results.outputs[iter2]['v'] for iter2 in range(len(high_fidelity_results.times))]
          -
          -    plt.subplots()
          -    plt.plot(simulated_results.times,voltage_dmd,'-b',label='DMD approximation')
          -    plt.plot(high_fidelity_results.times, voltage_hf,'--r',label='High fidelity result')
          -    plt.legend()
          -    plt.title('Comparing DMD approximation to high-fidelity model results')
          -
          -    ### Example 2: Add process_noise to the surrogate model
          -        # Without re-generating the surrogate model, we can re-define the process_noise to be higher than the high-fidelity model (since the surrogate model is less accurate)
          -    surrogate.parameters['process_noise'] = 1e-04
          -    surrogate.parameters['process_noise_dist'] = 'normal'
          -
          -    # Simulate to threshold using DMD approximation
          -    simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim)
          -
          -    # Plot results
          -    simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 2 Input')
          -    simulated_results.outputs.plot(keys=['v'],ylabel = 'Predicted Voltage (volts)', title='Example 2 Predicted Outputs')
          -    simulated_results.event_states.plot(ylabel = 'Predicted State of Charge', title='Example 2 Predicted SOC')
          -
          -    ### Example 3: Generate surrogate model with a subset of internal states, inputs, and/or outputs
          -        # Note: we use the same loading profiles as defined in Ex. 1
          -
          -    ## Generate surrogate model
          -    # Simulation options for training data and surrogate model generation
          -    options_surrogate = {
          -        'save_freq': 1, # For DMD, this value is the time step for which the surrogate model is generated
          -        'dt': 0.1, # For DMD, this value is the time step of the training data
          -        'trim_data': 1, # Value between 0 and 1 that determines the fraction of data resulting from simulate_to_threshold that is used to train DMD surrogate model
          -        'state_keys': ['Vsn','Vsp','tb'], # Define internal states to be included in surrogate model
          -        'output_keys': ['v'] # Define outputs to be included in surrogate model
          -    }
          -
          -    # Set noise in Prognostics Model, default for surrogate model is also this value
          -    batt.parameters['process_noise'] = 0
          -
          -    # Generate surrogate model
          -    surrogate = batt.generate_surrogate(load_functions,**options_surrogate)
          -
          -    ## Use surrogate model
          -    # The surrogate model can now be used anywhere the original model is used. It is interchangeable with the original model.
          -    # The surrogate model results will be faster but less accurate than the original model.
          -
          -    # Simulation options for implementation of surrogate model
          -    options_sim = {
          -        'save_freq': 1 # Frequency at which results are saved, or equivalently time step in results
          -    }
          -
          -    # Simulate to threshold using DMD approximation
          -    simulated_results = surrogate.simulate_to_threshold(future_loading,**options_sim)
          -
          -    # Calculate Error
          -    MSE = batt.calc_error(simulated_results.times, simulated_results.inputs, simulated_results.outputs)
          -    print('Example 3 MSE:',MSE)
          -
          -    # Plot results
          -    simulated_results.inputs.plot(ylabel = 'Current (amps)',title='Example 3 Input')
          -    simulated_results.outputs.plot(ylabel = 'Outputs (voltage)',title='Example 3 Predicted Output')
          -    simulated_results.event_states.plot(ylabel = 'State of Charge',title='Example 3 Predicted SOC')
          -    plt.show()
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/growth.html b/docs/auto_examples/growth.html deleted file mode 100644 index e2c570a2..00000000 --- a/docs/auto_examples/growth.html +++ /dev/null @@ -1,524 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating the Paris Law Crack Growth Equation

          -
          from progpy.models.paris_law import ParisLawCrackGrowth
          -import matplotlib.pyplot as plt
          -import csv
          -import os
          -
          -def run_example():
          -    # Step 1: Create a model object
          -    m = ParisLawCrackGrowth(process_noise = 0)
          -
          -    # Step 2: Define future loading function
          -    def future_loading(t, x=None):
          -        #variable (piece-wise) future loading scheme
          -        #inputs are ['k_min', 'k_max']
          -        if (t < 500):
          -            k_min = 12
          -            k_max = 24
          -        elif (t < 750):
          -            k_min = 8
          -            k_max = 32
          -        else:
          -            k_min = 0
          -            k_max = 28
          -        return m.InputContainer({'k_min': k_min, 'k_max': k_max})
          -
          -    # Step 3: Estimate parameters
          -    # We do not know the model parameters for this system,
          -    # so we need to estimate it using data collected from the system
          -    # First we have to import some data from the real system
          -    # This is what we use to estimate parameters
          -    times = []
          -    inputs = []
          -    outputs = []
          -
          -    #Finds file path
          -    csv_dir = os.path.join(os.path.dirname(__file__), 'growth.csv')
          -
          -    #Reads csv file
          -    try:
          -        with open(csv_dir, newline='') as csvfile:
          -            data = csv.reader(csvfile, delimiter=',', quotechar='|' , quoting=csv.QUOTE_NONNUMERIC)
          -            for row in data:
          -                times.append(row[0])
          -                inputs.append({'k_min': row[1], 'k_max': row[2]})
          -                outputs.append({'c_l': row[3]})
          -    except FileNotFoundError:
          -        print("No data file found")
          -
          -    # Estimates the model parameters
          -    keys = ['c', 'm']
          -
          -    print('Model configuration before')
          -    for key in keys:
          -        print("-", key, m.parameters[key])
          -    print(' Error: ', m.calc_error(times, inputs, outputs, dt=10))
          -
          -    m.estimate_params([(times, inputs, outputs)], keys, dt=10)
          -
          -    print('\nOptimized configuration')
          -    for key in keys:
          -        print("-", key, m.parameters[key])
          -    print(' Error: ', m.calc_error(times, inputs, outputs, dt=10))
          -
          -    # Step 4: Simulate to threshold
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    options = {
          -        'save_freq': 10, # Frequency at which results are saved
          -        'dt': 10, # Timestep
          -        'print': True,
          -        'horizon': 1e5, # Horizon
          -    }
          -
          -    (times, inputs, _, outputs, event_states) = m.simulate_to_threshold(future_loading, **options)
          -
          -    # Step 5: Plot Results
          -    # crack length
          -    # plot event state
          -
          -    inputs.plot(ylabel='Stress Intensity')
          -    event_states.plot(ylabel= 'CGF')
          -    outputs.plot(ylabel= {'c_l': "Crack Length"}, compact= False)
          -    plt.show()
          -
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/index.html b/docs/auto_examples/index.html deleted file mode 100644 index 099df423..00000000 --- a/docs/auto_examples/index.html +++ /dev/null @@ -1,489 +0,0 @@ - - - - - - - - - - - - Example Gallery — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          Example Gallery

          - -
          -
          - -
          -
          -
          - - - - -
          - - - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/linear_model.html b/docs/auto_examples/linear_model.html deleted file mode 100644 index 9658f421..00000000 --- a/docs/auto_examples/linear_model.html +++ /dev/null @@ -1,541 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          This example shows the use of the LinearModel class, a subclass of PrognosticsModel for models that can be described as a linear time series.

          -

          The model is used in a simulation, and the state is printed every second

          -
          from prog_models import LinearModel
          -import numpy as np
          -
          -class ThrownObject(LinearModel):
          -    """
          -    Model that similates an object thrown into the air without air resistance
          -
          -    Events (2)
          -        | falling: The object is falling
          -        | impact: The object has hit the ground
          -
          -    Inputs/Loading: (0)
          -
          -    States: (2)
          -        | x: Position in space (m)
          -        | v: Velocity in space (m/s)
          -
          -    Outputs/Measurements: (1)
          -        | x: Position in space (m)
          -
          -    Keyword Args
          -    ------------
          -        process_noise : Optional, float or Dict[Srt, float]
          -          Process noise (applied at dx/next_state).
          -          Can be number (e.g., .2) applied to every state, a dictionary of values for each
          -          state (e.g., {'x1': 0.2, 'x2': 0.3}), or a function (x) -> x
          -        process_noise_dist : Optional, String
          -          distribution for process noise (e.g., normal, uniform, triangular)
          -        measurement_noise : Optional, float or Dict[Srt, float]
          -          Measurement noise (applied in output eqn).
          -          Can be number (e.g., .2) applied to every output, a dictionary of values for each
          -          output (e.g., {'z1': 0.2, 'z2': 0.3}), or a function (z) -> z
          -        measurement_noise_dist : Optional, String
          -          distribution for measurement noise (e.g., normal, uniform, triangular)
          -        g : Optional, float
          -            Acceleration due to gravity (m/s^2). Default is 9.81 m/s^2 (standard gravity)
          -        thrower_height : Optional, float
          -            Height of the thrower (m). Default is 1.83 m
          -        throwing_speed : Optional, float
          -            Speed at which the ball is thrown (m/s). Default is 40 m/s
          -    """
          -
          -    inputs = []  # no inputs, no way to control
          -    states = [
          -        'x',     # Position (m)
          -        'v'      # Velocity (m/s)
          -        ]
          -    outputs = [
          -        'x'      # Position (m)
          -    ]
          -    events = [
          -        'impact' # Event- object has impacted ground
          -    ]
          -
          -    # These are the core of the linear model.
          -    # Linear models defined by the following equations:
          -    #   * dx/dt = Ax + Bu + E
          -    #   * z = Cx + D
          -    #   * event states = Fx + G
          -    A = np.array([[0, 1], [0, 0]]) # dx/dt = Ax + Bu + E
          -    E = np.array([[0], [-9.81]]) # Acceleration due to gravity (m/s^2)
          -    C = np.array([[1, 0]]) # z = Cx + D
          -    F = None # Will override method
          -
          -    # The Default parameters. Overwritten by passing parameters dictionary into constructor
          -    default_parameters = {
          -        'thrower_height': 1.83,  # m
          -        'throwing_speed': 40,  # m/s
          -        'g': -9.81  # Acceleration due to gravity in m/s^2
          -    }
          -
          -    def initialize(self, u=None, z=None):
          -        return self.StateContainer({
          -            'x': self.parameters['thrower_height'],  # Thrown, so initial altitude is height of thrower
          -            'v': self.parameters['throwing_speed']  # Velocity at which the ball is thrown - this guy is a professional baseball pitcher
          -            })
          -
          -    # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.
          -    #  Threshold = Event State == 0. However, this implementation is more efficient, so we included it
          -    def threshold_met(self, x):
          -        return {
          -            'falling': x['v'] < 0,
          -            'impact': x['x'] <= 0
          -        }
          -
          -    def event_state(self, x):
          -        x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height
          -        return {
          -            'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0),  # Throwing speed is max speed
          -            'impact': np.maximum(x['x']/x_max,0) if x['v'] < 0 else 1  # 1 until falling begins, then it's fraction of height
          -        }
          -
          -def run_example():
          -    m = ThrownObject()
          -    def future_loading(t, x=None):
          -        return m.InputContainer({})  # No loading
          -    m.simulate_to_threshold(future_loading, print = True, save_freq=1, threshold_keys='impact', dt=0.1)
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/lstm_model.html b/docs/auto_examples/lstm_model.html deleted file mode 100644 index 1c31cb77..00000000 --- a/docs/auto_examples/lstm_model.html +++ /dev/null @@ -1,630 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example building a LSTMStateTransitionModel from data. This is a simple example of how to use the LSTMStateTransitionModel class.

          -

          In this example, we generate fake data using the ThrownObject model. This is a case where we’re generating a surrogate model from the physics-based model. For cases where you’re generating a model from data (e.g., collected from a testbed or a real-world environment), you’ll replace that generated data with your own. We then use the generated model and compare to the original model.

          -

          Finally, we repeat the exercise with data from the more complex BatteryElectroChemEOD model.

          -
          import matplotlib.pyplot as plt
          -import numpy as np
          -
          -from progpy.data_models import LSTMStateTransitionModel
          -from progpy.models import ThrownObject, BatteryElectroChemEOD
          -
          -def run_example():
          -    # -----------------------------------------------------
          -    # Example 1- set timestep
          -    # Here we will create a model for a specific timestep.
          -    # The model will only work with that timestep
          -    # This is useful if you know the timestep you would like to use
          -    # -----------------------------------------------------
          -    TIMESTEP = 0.01
          -
          -    # Step 1: Generate data
          -    # We'll use the ThrownObject model to generate data.
          -    # For cases where you're generating a model from data (e.g., collected from a testbed or a real-world environment),
          -    # you'll replace that generated data with your own.
          -    print('Generating data')
          -    m = ThrownObject()
          -
          -    def future_loading(t, x=None):
          -        return m.InputContainer({})  # No input for thrown object
          -
          -    data = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP, dt=TIMESTEP)
          -
          -    # Step 2: Generate model
          -    # We'll use the LSTMStateTransitionModel class to generate a model from the data.
          -    print('Building model...')
          -    m2 = LSTMStateTransitionModel.from_data(
          -        inputs = [data.inputs],
          -        outputs = [data.outputs],
          -        window=4,
          -        epochs=30,
          -        output_keys = ['x'])
          -
          -    # Step 3: Use model to simulate_to time of threshold
          -    print('Simulating with generated model...')
          -
          -    t_counter = 0
          -    x_counter = m.initialize()
          -    def future_loading2(t, x = None):
          -        # Future Loading is a bit complicated here
          -        # Loading for the resulting model includes the data inputs,
          -        # and the output from the last timestep
          -        nonlocal t_counter, x_counter
          -        z = m.output(x_counter)
          -        z = m2.InputContainer(z.matrix)
          -        x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)
          -        t_counter = t
          -        return z
          -
          -    results2 = m2.simulate_to(data.times[-1], future_loading2, dt=TIMESTEP, save_freq=TIMESTEP)
          -
          -    # Step 4: Compare model to original model
          -    print('Comparing results...')
          -    data.outputs.plot(title='original model')
          -    results2.outputs.plot(title='generated model')
          -    plt.show()
          -
          -    # -----------------------------------------------------
          -    # Example 2- variable timestep
          -    # Here we will create a model to work with any timestep
          -    # We do this by adding timestep as a variable in the model
          -    # -----------------------------------------------------
          -
          -    # Step 1: Generate additional data
          -    # We will use data generated above, but we also want data at additional timesteps
          -    print('\n------------------------------------------\nExample 2...')
          -    print('Generating additional data...')
          -    data_half = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/2, dt=TIMESTEP/2)
          -    data_quarter = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP/4, dt=TIMESTEP/4)
          -    data_twice = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*2, dt=TIMESTEP*2)
          -    data_four = m.simulate_to_threshold(future_loading, threshold_keys='impact', save_freq=TIMESTEP*4, dt=TIMESTEP*4)
          -
          -    # Step 2: Data Prep
          -    # We need to add the timestep as a input
          -    u = np.array([[TIMESTEP] for _ in data.inputs])
          -    u_half = np.array([[TIMESTEP/2] for _ in data_half.inputs])
          -    u_quarter = np.array([[TIMESTEP/4] for _ in data_quarter.inputs])
          -    u_twice = np.array([[TIMESTEP*2] for _ in data_twice.inputs])
          -    u_four = np.array([[TIMESTEP*4] for _ in data_four.inputs])
          -
          -    input_data = [u, u_half, u_quarter, u_twice, u_four]
          -    output_data = [data.outputs, data_half.outputs, data_quarter.outputs, data_twice.outputs, data_four.outputs]
          -
          -    # Step 3: Generate Model
          -    print('Building model...')
          -    m3 = LSTMStateTransitionModel.from_data(
          -        inputs = input_data,
          -        outputs = output_data,
          -        window=4,
          -        epochs=30,
          -        input_keys = ['dt'],
          -        output_keys = ['x'])
          -    # Note, since we're generating from a model, we could also have done this:
          -    # m3 = LSTMStateTransitionModel.from_model(
          -    #     m,
          -    #     [future_loading for _ in range(5)],
          -    #     dt = [TIMESTEP, TIMESTEP/2, TIMESTEP/4, TIMESTEP*2, TIMESTEP*4],
          -    #     window=4,
          -    #     epochs=30)
          -
          -    # Step 4: Simulate with model
          -    t_counter = 0
          -    x_counter = m.initialize()
          -    def future_loading3(t, x = None):
          -        nonlocal t_counter, x_counter
          -        z = m3.InputContainer({'x_t-1': x_counter['x'], 'dt': t - t_counter})
          -        x_counter = m.next_state(x_counter, future_loading(t), t - t_counter)
          -        t_counter = t
          -        return z
          -
          -    # Use new dt, not used in training
          -    # Using a dt not used in training will demonstrate the model's
          -    # ability to handle different timesteps not part of training set
          -    data = m.simulate_to(data.times[-1], future_loading, dt=TIMESTEP*3, save_freq=TIMESTEP*3)
          -    results3 = m3.simulate_to(data.times[-1], future_loading3, dt=TIMESTEP*3, save_freq=TIMESTEP*3)
          -
          -    # Step 5: Compare Results
          -    print('Comparing results...')
          -    data.outputs.plot(title='original model')
          -    results3.outputs.plot(title='generated model')
          -    plt.show()
          -
          -    # -----------------------------------------------------
          -    # Example 3- More complicated system
          -    # Here we will create a model for a more complicated system
          -    # For this example we will use the BatteryElectroChemEOD model
          -    # -----------------------------------------------------
          -    print('\n------------------------------------------\nExample 3...')
          -    print('Generating data...')
          -    batt = BatteryElectroChemEOD(process_noise = 0, measurement_noise=0)
          -    future_loading_eqns = [lambda t, x=None, load=load: batt.InputContainer({'i': 1+1.5*load}) for load in range(6)]
          -    # Generate data with different loading and step sizes
          -    # Adding the step size as an element of the output
          -    input_data = []
          -    output_data = []
          -    for i in range(9):
          -        dt = i/3+0.25
          -        for loading_eqn in future_loading_eqns:
          -            d = batt.simulate_to_threshold(loading_eqn, save_freq=dt, dt=dt)
          -            input_data.append(np.array([np.hstack((u_i.matrix[:][0].T, [dt])) for u_i in d.inputs], dtype=float))
          -            output_data.append(d.outputs)
          -
          -    # Step 2: Generate Model
          -    print('Building model...')
          -    m_batt = LSTMStateTransitionModel.from_data(
          -        inputs = input_data,
          -        outputs = output_data,
          -        window=12,
          -        epochs=3,
          -        units=64,  # Additional units given the increased complexity of the system
          -        input_keys = ['i', 'dt'],
          -        output_keys = ['t', 'v'])
          -
          -    # Step 3: Simulate with model
          -    t_counter = 0
          -    x_counter = batt.initialize()
          -
          -    def future_loading(t, x=None):
          -        return batt.InputContainer({'i': 3})
          -
          -    def future_loading2(t, x = None):
          -        nonlocal t_counter, x_counter
          -        z = batt.output(x_counter)
          -        z = m_batt.InputContainer({'i': 3, 't_t-1': z['t'], 'v_t-1': z['v'], 'dt': t - t_counter})
          -        x_counter = batt.next_state(x_counter, future_loading(t), t - t_counter)
          -        t_counter = t
          -        return z
          -
          -    # Use a new dt, not used in training.
          -    # Using a dt not used in training will demonstrate the model's
          -    # ability to handle different timesteps not part of training set
          -    data = batt.simulate_to_threshold(future_loading, dt=1, save_freq=1)
          -    results = m_batt.simulate_to(data.times[-1], future_loading2, dt=1, save_freq=1)
          -
          -    # Step 5: Compare Results
          -    print('Comparing results...')
          -    data.outputs.plot(title='original model', compact=False)
          -    results.outputs.plot(title='generated model', compact=False)
          -    plt.show()
          -
          -    # This last example isn't a perfect fit, but it matches the behavior pretty well
          -    # Especially the voltage curve
          -
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/matrix_model.html b/docs/auto_examples/matrix_model.html deleted file mode 100644 index 95dd46f9..00000000 --- a/docs/auto_examples/matrix_model.html +++ /dev/null @@ -1,536 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          This example shows the use of the advanced feature - matrix models. Matrix models represent the state of the system using matricies instead of dictionaries. The provided model.StateContainer, InputContainer, and OutputContainer can be treated as dictionaries but use an underly matrix. This is important for some applications like surrogate and machine-learned models where the state is represented by a tensor, and operations by matrix operations. Simulation functions propogate the state using the matrix form, preventing the inefficiency of having to convert to and from dictionaries.

          -

          In this example, a model is designed to simulate a thrown object using matrix notation (instead of dictionary notation as in the standard model). The implementation of the model is comparable to a standard model, except that it uses the x.matrix, u.matrix, and z.matirx to compute matrix operations within each function.

          -
          def run_example():
          -    from prog_models import PrognosticsModel
          -    import numpy as np
          -
          -    # Define the model
          -    class ThrownObject(PrognosticsModel):
          -        # Define the model properties, this is exactly the same as for a regular model.
          -
          -        inputs = []  # no inputs, no way to control
          -        states = [
          -            'x',    # Position (m)
          -            'v'    # Velocity (m/s)
          -            ]
          -        outputs = [
          -            'x'     # Position (m)
          -        ]
          -        events = [
          -            'falling',  # Event- object is falling
          -            'impact'    # Event- object has impacted ground
          -        ]
          -
          -        is_vectorized = True
          -
          -        # The Default parameters. Overwritten by passing parameters dictionary into constructor
          -        default_parameters = {
          -            'thrower_height': 1.83,  # m
          -            'throwing_speed': 40,  # m/s
          -            'g': -9.81,  # Acceleration due to gravity in m/s^2
          -            'process_noise': 0.0  # amount of noise in each step
          -        }
          -
          -        # Define the model equations
          -        def initialize(self, u = None, z = None):
          -            # Note: states are returned using StateContainer
          -            return self.StateContainer({
          -                'x': self.parameters['thrower_height'],
          -                'v': self.parameters['throwing_speed']})
          -
          -        def next_state(self, x, u, dt):
          -            # Here we will use the matrix version for each variable
          -            # Note: x.matrix is a column vector
          -            # Note: u.matrix is a column vector
          -            #   and u.matrix is in the order of model.inputs, above
          -
          -            A = np.array([[0, 1], [0, 0]])  # State transition matrix
          -            B = np.array([[0], [self.parameters['g']]])  # Acceleration due to gravity
          -            x.matrix += (np.matmul(A, x.matrix) + B) * dt
          -
          -            return x
          -
          -        def output(self, x):
          -            # Note- states can still be accessed a dictionary
          -            return self.OutputContainer({'x': x['x']})
          -
          -        # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.
          -        #  Threshold = Event State == 0. However, this implementation is more efficient, so we included it
          -        def threshold_met(self, x):
          -            return {
          -                'falling': x['v'] < 0,
          -                'impact': x['x'] <= 0
          -            }
          -
          -        def event_state(self, x):
          -            x_max = x['x'] + np.square(x['v'])/(-self.parameters['g']*2) # Use speed and position to estimate maximum height
          -            x_max = np.where(x['v'] > 0, x['x'], x_max) # 1 until falling begins
          -            return {
          -                'falling': np.maximum(x['v']/self.parameters['throwing_speed'],0),  # Throwing speed is max speed
          -                'impact': np.maximum(x['x']/x_max,0)  # then it's fraction of height
          -            }
          -
          -    # Now we can use the model
          -    # Create the model
          -    thrown_object = ThrownObject()
          -
          -    # Use the model
          -    x = thrown_object.initialize()
          -    print('State at 0.1 seconds: ', thrown_object.next_state(x, {}, 0.1))
          -
          -    # But you can also initialize state directly, like so:
          -    x = thrown_object.StateContainer({'x': 1.93, 'v': 40})
          -    print('State at 0.1 seconds: ', thrown_object.next_state(x, None, 0.1))
          -
          -    # Now lets use it for simulation.
          -    def future_loading(t, x=None):
          -        return thrown_object.InputContainer({})
          -
          -    thrown_object.simulate_to_threshold(
          -        future_loading,
          -        print = True,
          -        threshold_keys = 'impact',
          -        dt = 0.1,
          -        save_freq = 1)
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/model_gen.html b/docs/auto_examples/model_gen.html deleted file mode 100644 index 33e4ebdf..00000000 --- a/docs/auto_examples/model_gen.html +++ /dev/null @@ -1,515 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example generating models from constituent parts.

          -

          The model used for this example is that of an object thrown into the air, predicting the impact event

          -
          # Deriv prog model was selected because the model can be described as x' = x + dx*dt
          -from prog_models import PrognosticsModel
          -
          -def run_example():
          -    # Step 1: Define keys
          -    keys = {
          -        'inputs': [], # no inputs, no way to control
          -        'states': [
          -            'x', # Position (m)
          -            'v'  # Velocity (m/s)
          -            ],
          -        'outputs': [ # Anything we can measure
          -            'x' # Position (m)
          -        ],
          -        'events': [
          -            'falling', # Event- object is falling
          -            'impact' # Event- object has impacted ground
          -        ]
          -    }
          -
          -    thrower_height = 1.83 # m
          -    throwing_speed = 40 # m/s
          -    # Step 2: Define initial state
          -    def initialize(u, z):
          -        return {
          -            'x': thrower_height, # Thrown, so initial altitude is height of thrower
          -            'v': throwing_speed # Velocity at which the ball is thrown - this guy is an professional baseball pitcher
          -            }
          -
          -    # Step 3: Define dx equation
          -    def dx(x, u):
          -        return {
          -            'x': x['v'],
          -            'v': -9.81 # Acceleration of gravity
          -        }
          -
          -    # Step 3: Define equation for calculating output/measuremetn
          -    def output(x):
          -        return {
          -            'x': x['x']
          -        }
          -
          -    # Step 4: Define threshold equation
          -    def threshold_met(x):
          -        return {
          -            'falling': x['v'] < 0,
          -            'impact': x['x'] <= 0
          -        }
          -
          -    # Step 5 (optional): Define event state equation- measurement of how close you are to threshold (0-1)
          -    def event_state(x):
          -        event_state.max_x = max(event_state.max_x, x['x']) # Maximum altitude
          -        return {
          -            'falling': max(x['v']/throwing_speed,0), # Throwing speed is max speed
          -            'impact': max(x['x']/event_state.max_x,0) # 1 until falling begins, then it's fraction of height
          -        }
          -    event_state.max_x = 0
          -
          -    # Step 6: Generate model
          -    m = PrognosticsModel.generate_model(keys, initialize, output, event_state_eqn = event_state, threshold_eqn=threshold_met, dx_eqn=dx)
          -
          -    # Step 7: Setup for simulation
          -    def future_load(t, x=None):
          -        return {}
          -
          -    # Step 8: Simulate to impact
          -    event = 'impact'
          -    simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt = 0.005, save_freq=1, print = True)
          -
          -    # Print flight time
          -    print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2)))
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/new_model.html b/docs/auto_examples/new_model.html deleted file mode 100644 index 07b0af76..00000000 --- a/docs/auto_examples/new_model.html +++ /dev/null @@ -1,551 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example defining and testing a new model.

          -
          from prog_models import PrognosticsModel
          -
          -
          -class ThrownObject(PrognosticsModel):
          -    """
          -    Model that similates an object thrown into the air without air resistance
          -    """
          -
          -    inputs = [] # no inputs, no way to control
          -    states = [
          -        'x', # Position (m)
          -        'v'  # Velocity (m/s)
          -        ]
          -    outputs = [ # Anything we can measure
          -        'x' # Position (m)
          -    ]
          -    events = [
          -        'falling', # Event- object is falling
          -        'impact' # Event- object has impacted ground
          -    ]
          -
          -    # The Default parameters. Overwritten by passing parameters dictionary into constructor
          -    default_parameters = {
          -        'thrower_height': 1.83,  # m
          -        'throwing_speed': 40,  # m/s
          -        'g': -9.81,  # Acceleration due to gravity in m/s^2
          -        'process_noise': 0.0  # amount of noise in each step
          -    }
          -
          -    def initialize(self, u, z):
          -        self.max_x = 0.0
          -        return self.StateContainer({
          -            'x': self.parameters['thrower_height'],  # Thrown, so initial altitude is height of thrower
          -            'v': self.parameters['throwing_speed']  # Velocity at which the ball is thrown - this guy is a professional baseball pitcher
          -            })
          -
          -    def dx(self, x, u):
          -        return self.StateContainer({'x': x['v'],
          -                'v': self.parameters['g']})  # Acceleration of gravity
          -
          -    def output(self, x):
          -        return self.OutputContainer({'x': x['x']})
          -
          -    # This is actually optional. Leaving thresholds_met empty will use the event state to define thresholds.
          -    #  Threshold = Event State == 0. However, this implementation is more efficient, so we included it
          -    def threshold_met(self, x):
          -        return {
          -            'falling': x['v'] < 0,
          -            'impact': x['x'] <= 0
          -        }
          -
          -    def event_state(self, x):
          -        self.max_x = max(self.max_x, x['x'])  # Maximum altitude
          -        return {
          -            'falling': max(x['v']/self.parameters['throwing_speed'],0),  # Throwing speed is max speed
          -            'impact': max(x['x']/self.max_x,0)  # 1 until falling begins, then it's fraction of height
          -        }
          -
          -def run_example():
          -    # Demo model
          -    # Step 1: Create instance of model
          -    m = ThrownObject()
          -
          -    # Step 2: Setup for simulation
          -    def future_load(t, x=None):
          -        return m.InputContainer({})  # No inputs, no way to control
          -
          -    # Step 3: Simulate to impact
          -    event = 'impact'
          -    simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, print = True)
          -
          -    # Print flight time
          -    print('The object hit the ground in {} seconds'.format(round(simulated_results.times[-1],2)))
          -
          -    # OK, now lets compare performance on different heavenly bodies.
          -    # This requires that we update the cofiguration
          -    grav_moon = -1.62
          -
          -    # The first way to change the configuration is to pass in your desired config into construction of the model
          -    m = ThrownObject(g = grav_moon)
          -    simulated_moon_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1})
          -
          -    grav_mars = -3.711
          -    # You can also update the parameters after it's constructed
          -    m.parameters['g'] = grav_mars
          -    simulated_mars_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1})
          -
          -    grav_venus = -8.87
          -    m.parameters['g'] = grav_venus
          -    simulated_venus_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':0.005, 'save_freq':1})
          -
          -    print('Time to hit the ground: ')
          -    print('\tvenus: {}s'.format(round(simulated_venus_results.times[-1],2)))
          -    print('\tearth: {}s'.format(round(simulated_results.times[-1],2)))
          -    print('\tmars: {}s'.format(round(simulated_mars_results.times[-1],2)))
          -    print('\tmoon: {}s'.format(round(simulated_moon_results.times[-1],2)))
          -
          -    # We can also simulate until any event is met by neglecting the threshold_keys argument
          -    simulated_results = m.simulate_to_threshold(future_load, options={'dt':0.005, 'save_freq':1})
          -    threshs_met = m.threshold_met(simulated_results.states[-1])
          -    for (key, met) in threshs_met.items():
          -        if met:
          -            event_occured = key
          -    print('\nThis event that occured first: ', event_occured)
          -    # It falls before it hits the gorund, obviously
          -
          -    # Metrics can be analyzed from the simulation results. For example: monotonicity
          -    print('\nMonotonicity: ', simulated_results.event_states.monotonicity())
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/noise.html b/docs/auto_examples/noise.html deleted file mode 100644 index e8efebb8..00000000 --- a/docs/auto_examples/noise.html +++ /dev/null @@ -1,546 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating approaches for adding and handling model noise

          -
          import matplotlib.pyplot as plt
          -
          -from progpy.models.thrown_object import ThrownObject
          -
          -def run_example():
          -    # Define future loading
          -    def future_load(t=None, x=None):
          -        # The thrown object model has no inputs- you cannot load the system (i.e., affect it once it's in the air)
          -        # So we return an empty input container
          -        return m.InputContainer({})
          -
          -    # Define configuration for simulation
          -    config = {
          -        'threshold_keys': 'impact', # Simulate until the thrown object has impacted the ground
          -        'dt': 0.005, # Time step (s)
          -        'save_freq': 0.5, # Frequency at which results are saved (s)
          -    }
          -
          -    # Define a function to print the results - will be used later
          -    def print_results(simulated_results):
          -        # Print results
          -        print('states:')
          -        for (t,x) in zip(simulated_results.times, simulated_results.states):
          -            print('\t{:.2f}s: {}'.format(t, x))
          -
          -        print('outputs:')
          -        for (t,x) in zip(simulated_results.times, simulated_results.outputs):
          -            print('\t{:.2f}s: {}'.format(t, x))
          -
          -        print('\nimpact time: {:.2f}s'.format(simulated_results.times[-1]))
          -        # The simulation stopped at impact, so the last element of times is the impact time
          -
          -        # Plot results
          -        simulated_results.states.plot()
          -
          -    # Ex1: No noise
          -    m = ThrownObject(process_noise = False)
          -    simulated_results = m.simulate_to_threshold(future_load, **config)
          -    print_results(simulated_results)
          -    plt.title('Ex1: No noise')
          -
          -    # Ex2: with noise - same noise applied to every state
          -    process_noise = 15
          -    m = ThrownObject(process_noise = process_noise)  # Noise with a std of 0.5 to every state
          -    print('\nExample without same noise for every state')
          -    simulated_results = m.simulate_to_threshold(future_load, **config)
          -    print_results(simulated_results)
          -    plt.title('Ex2: Basic Noise')
          -
          -    # Ex3: noise- more noise on position than velocity
          -    process_noise = {'x': 30, 'v': 1}
          -    m = ThrownObject(process_noise = process_noise)
          -    print('\nExample with more noise on position than velocity')
          -    simulated_results = m.simulate_to_threshold(future_load, **config)
          -    print_results(simulated_results)
          -    plt.title('Ex3: More noise on position')
          -
          -    # Ex4: noise- Ex3 but uniform
          -    process_noise_dist = 'uniform'
          -    model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise}
          -    m = ThrownObject(**model_config)
          -    print('\nExample with more uniform noise')
          -    simulated_results = m.simulate_to_threshold(future_load, **config)
          -    print_results(simulated_results)
          -    plt.title('Ex4: Ex3 with uniform dist')
          -
          -    # Ex5: noise- Ex3 but triangle
          -    process_noise_dist = 'triangular'
          -    model_config = {'process_noise_dist': process_noise_dist, 'process_noise': process_noise}
          -    m = ThrownObject(**model_config)
          -    print('\nExample with triangular process noise')
          -    simulated_results = m.simulate_to_threshold(future_load, **config)
          -    print_results(simulated_results)
          -    plt.title('Ex5: Ex3 with triangular dist')
          -
          -    # Ex6: Measurement noise
          -    # Everything we've done with process noise, we can also do with measurement noise.
          -    # Just use 'measurement_noise' and 'measurement_noise_dist'
          -    measurement_noise = {'x': 20}  # For each output
          -    measurement_noise_dist = 'uniform'
          -    model_config = {'measurement_noise_dist': measurement_noise_dist, 'measurement_noise': measurement_noise}
          -    m = ThrownObject(**model_config)
          -    print('\nExample with measurement noise')
          -    print('- Note: outputs are different than state- this is the application of measurement noise')
          -    simulated_results = m.simulate_to_threshold(future_load, **config)
          -    print_results(simulated_results)
          -    plt.title('Ex6: Measurement noise')
          -
          -    # Ex7: OK, now for something a little more complicated. Let's try proportional noise on v only (more variation when it's going faster)
          -    # This can be used to do custom or more complex noise distributions
          -    def apply_proportional_process_noise(self, x, dt = 1):
          -        x['v'] -= dt*0.5*x['v']
          -        return x
          -    model_config = {'process_noise': apply_proportional_process_noise}
          -    m = ThrownObject(**model_config)
          -    print('\nExample with proportional noise on velocity')
          -    simulated_results = m.simulate_to_threshold(future_load, **config)
          -    print_results(simulated_results)
          -    plt.title('Ex7: Proportional noise on velocity')
          -
          -    print('\nNote: If you would like noise to be applied in a repeatable manner, set the numpy random seed to a fixed value')
          -    print('e.g., numpy.random.seed(42)')
          -    plt.show()
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/param_est.html b/docs/auto_examples/param_est.html deleted file mode 100644 index 9725b187..00000000 --- a/docs/auto_examples/param_est.html +++ /dev/null @@ -1,484 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating the model parameter estimation feature.

          -
          from progpy.models.thrown_object import ThrownObject
          -
          -def run_example():
          -    # Step 1: Build the model with your best guess in parameters
          -    # Here we're guessing that the thrower is 20 meters tall. Obviously not true!
          -    # Let's see if parameter estimation can fix this
          -    m = ThrownObject(thrower_height=20)
          -
          -    # Step 2: Collect data from the use of the system. Let's pretend we threw the ball once, and collected position measurements
          -    times = [0, 1, 2, 3, 4, 5, 6, 7, 8]
          -    inputs = [{}]*9
          -    outputs = [
          -        {'x': 1.83},
          -        {'x': 36.95},
          -        {'x': 62.36},
          -        {'x': 77.81},
          -        {'x': 83.45},
          -        {'x': 79.28},
          -        {'x': 65.3},
          -        {'x': 41.51},
          -        {'x': 7.91},
          -    ]
          -
          -    # Step 3: Identify the parameters to be estimated
          -    keys = ['thrower_height', 'throwing_speed']
          -
          -    # Printing state before
          -    print('Model configuration before')
          -    for key in keys:
          -        print("-", key, m.parameters[key])
          -    print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4))
          -
          -    # Step 4: Run parameter estimation with data
          -    m.estimate_params([(times, inputs, outputs)], keys, dt=0.01)
          -
          -    # Print result
          -    print('\nOptimized configuration')
          -    for key in keys:
          -        print("-", key, m.parameters[key])
          -    print(' Error: ', m.calc_error(times, inputs, outputs, dt=1e-4))
          -
          -    # Sure enough- parameter estimation determined that the thrower's height wasn't 20 m, instead was closer to 1.9m, a much more reasonable height!
          -
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/sensitivity.html b/docs/auto_examples/sensitivity.html deleted file mode 100644 index 30c4908f..00000000 --- a/docs/auto_examples/sensitivity.html +++ /dev/null @@ -1,485 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example performing a sensitivity analysis on a new model.

          -
          # Deriv prog model was selected because the model can be described as x' = x + dx*dt
          -from progpy.models.thrown_object import ThrownObject
          -import numpy as np
          -
          -def run_example():
          -    # Demo model
          -    # Step 1: Create instance of model
          -    m = ThrownObject()
          -
          -    # Step 2: Setup for simulation
          -    def future_load(t, x=None):
          -        return m.InputContainer({})
          -
          -    # Step 3: Setup range on parameters considered
          -    thrower_height_range = np.arange(1.2, 2.1, 0.1)
          -
          -    # Step 4: Sim for each
          -    event = 'impact'
          -    eods = np.empty(len(thrower_height_range))
          -    for (i, thrower_height) in zip(range(len(thrower_height_range)), thrower_height_range):
          -        m.parameters['thrower_height'] = thrower_height
          -        simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt =1e-3, save_freq =10)
          -        eods[i] = simulated_results.times[-1]
          -
          -    # Step 5: Analysis
          -    print('For a reasonable range of heights, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3)))
          -    sensitivity = (eods[-1]-eods[0])/(thrower_height_range[-1] - thrower_height_range[0])
          -    print('  - Average sensitivity: {} s per cm height'.format(round(sensitivity/100, 6)))
          -    print("  - It seems impact time is not very sensitive to thrower's height")
          -
          -    # Now lets repeat for throw speed
          -    throw_speed_range = np.arange(20, 40, 1)
          -    eods = np.empty(len(throw_speed_range))
          -    for (i, throw_speed) in zip(range(len(throw_speed_range)), throw_speed_range):
          -        m.parameters['throwing_speed'] = throw_speed
          -        simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], options={'dt':1e-3, 'save_freq':10})
          -        eods[i] = simulated_results.times[-1]
          -
          -    print('\nFor a reasonable range of throwing speeds, impact time is between {} and {}'.format(round(eods[0],3), round(eods[-1],3)))
          -    sensitivity = (eods[-1]-eods[0])/(throw_speed_range[-1] - throw_speed_range[0])
          -    print('  - Average sensitivity: {} s per m/s speed'.format(round(sensitivity/100, 6)))
          -    print("  - It seems impact time is much more dependent on throwing speed")
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/sim.html b/docs/auto_examples/sim.html deleted file mode 100644 index a8ce1cd1..00000000 --- a/docs/auto_examples/sim.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example of a battery being simulated for a set period of time and then till threshold is met.

          -
          from progpy.models import BatteryCircuit as Battery
          -# VVV Uncomment this to use Electro Chemistry Model VVV
          -# from progpy.models import BatteryElectroChem as Battery
          -
          -def run_example():
          -    # Step 1: Create a model object
          -    batt = Battery()
          -
          -    # Step 2: Define future loading function
          -    def future_loading(t, x=None):
          -        # Variable (piece-wise) future loading scheme
          -        if (t < 600):
          -            i = 2
          -        elif (t < 900):
          -            i = 1
          -        elif (t < 1800):
          -            i = 4
          -        elif (t < 3000):
          -            i = 2
          -        else:
          -            i = 3
          -        return batt.InputContainer({'i': i})
          -    # simulate for 200 seconds
          -    print('\n\n------------------------------------------------')
          -    print('Simulating for 200 seconds\n\n')
          -    simulated_results = batt.simulate_to(200, future_loading, print = True, progress = True)
          -
          -    # Simulate to threshold
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    options = {
          -        'save_freq': 100, # Frequency at which results are saved
          -        'dt': 2, # Timestep
          -        'print': True,
          -        'progress': True
          -    }
          -    simulated_results = batt.simulate_to_threshold(future_loading, **options)
          -
          -    # Alternately, you can set a max step size and allow step size to be adjusted automatically
          -    options['dt'] = ('auto', 2)  # set step size automatically, with a max of 2 seconds
          -    options['save_freq'] = 201  # Save every 201 seconds
          -    options['save_pts'] = [250, 772, 1023]  # Special points we sould like to see reported
          -    simulated_results = batt.simulate_to_threshold(future_loading, **options)
          -    # Note that even though the step size is 2, the odd points in the save frequency are met perfectly, dt is adjusted automatically to capture the save points
          -
          -    # You can also change the integration method. For example:
          -    options['integration_method'] = 'rk4'  # Using Runge-Kutta 4th order
          -    simulated_results_rk4 = batt.simulate_to_threshold(future_loading, **options)
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/sim_battery_eol.html b/docs/auto_examples/sim_battery_eol.html deleted file mode 100644 index 0ee1b070..00000000 --- a/docs/auto_examples/sim_battery_eol.html +++ /dev/null @@ -1,487 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example of a battery being simulated until End of Life (EOL). Battery capacity decreases with use. In this case, EOL is defined as when the battery capacity falls below some acceptable threshold (i.e., what we define as useful capacity).

          -
          import matplotlib.pyplot as plt
          -
          -from progpy.models import BatteryElectroChem as Battery
          -
          -def run_example():
          -    # Step 1: Create a model object
          -    batt = Battery()
          -
          -    # Step 2: Define future loading function
          -    # Here we're using a function designed to charge until 0.95,
          -    # then discharge until 0.05
          -    load = 1
          -
          -    def future_loading(t, x=None):
          -        nonlocal load
          -
          -        # Rule for loading after initialization
          -        if x is not None:
          -            # Current event state in the form {'EOD': <(0, 1)>, 'InsufficientCapacity': <(0, 1)>}
          -            event_state = batt.event_state(x)
          -            if event_state["EOD"] > 0.95:
          -                load = 1  # Discharge
          -            elif event_state["EOD"] < 0.05:
          -                load = -1  # Charge
          -        # Rule for loading at initialization
          -        return batt.InputContainer({'i': load})
          -
          -    # Step 3: Simulate to Capacity is insufficient Threshold
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    options = {
          -        'save_freq': 1000,  # Frequency at which results are saved
          -        'dt': 2,  # Timestep
          -        'threshold_keys': ['InsufficientCapacity'],  # Simulate to InsufficientCapacity
          -        'print': True
          -    }
          -    simulated_results = batt.simulate_to_threshold(future_loading, **options)
          -
          -    # Step 4: Plot Results
          -    simulated_results.inputs.plot(ylabel='Current drawn (amps)')
          -    simulated_results.event_states.plot(ylabel='Event States', labels={'EOD': 'State of Charge (SOC)', 'InsufficientCapacity': 'State of Health (SOH)'})
          -    plt.ylim([0, 1])
          -
          -    plt.show()
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/sim_powertrain.html b/docs/auto_examples/sim_powertrain.html deleted file mode 100644 index 5c0e2639..00000000 --- a/docs/auto_examples/sim_powertrain.html +++ /dev/null @@ -1,462 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example of a powertrain being simulated for a set amount of time.

          -
          from progpy.models import Powertrain, ESC, DCMotor
          -
          -def run_example():
          -    # Create a model object
          -    esc = ESC()
          -    motor = DCMotor()
          -    powertrain = Powertrain(esc, motor)
          -
          -    # Define future loading function - 100% duty all the time
          -    def future_loading(t, x=None):
          -        return powertrain.InputContainer({
          -            'duty': 1,
          -            'v': 23
          -        })
          -
          -    # Simulate to threshold
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    simulated_results = powertrain.simulate_to(2, future_loading, dt=2e-5, save_freq=0.1, print=True)
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/sim_pump.html b/docs/auto_examples/sim_pump.html deleted file mode 100644 index 9f8ae469..00000000 --- a/docs/auto_examples/sim_pump.html +++ /dev/null @@ -1,492 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example of a centrifugal pump being simulated until threshold is met.

          -
          from progpy.models import CentrifugalPump
          -
          -def run_example():
          -    # Step 1: Setup Pump
          -    pump = CentrifugalPump(process_noise= 0)
          -    pump.parameters['x0']['wA'] = 0.01  # Set Wear Rate
          -
          -    # Step 2: Setup Future Loading
          -    cycle_time = 3600
          -    def future_loading(t, x=None):
          -        t = t % cycle_time
          -        if t < cycle_time/2.0:
          -            V = 471.2389
          -        elif t < cycle_time/2 + 100:
          -            V = 471.2389 + (t-cycle_time/2)
          -        elif t < cycle_time - 100:
          -            V = 571.2389
          -        else:
          -            V = 471.2398 - (t-cycle_time)
          -
          -        return pump.InputContainer({
          -            'Tamb': 290,
          -            'V': V,
          -            'pdisch': 928654,
          -            'psuc': 239179,
          -            'wsync': V * 0.8
          -        })
          -
          -    # Step 3: Sim
          -    first_output = pump.output(pump.initialize(future_loading(0),{}))
          -    config = {
          -        'horizon': 1e5,
          -        'save_freq': 1e3,
          -        'print': True
          -    }
          -    simulated_results = pump.simulate_to_threshold(future_loading, first_output, **config)
          -
          -    # Step 4: Plot Results
          -    from progpy.visualize import plot_timeseries
          -    plot_timeseries(simulated_results.times, simulated_results.inputs, options={'compact': False, 'title': 'Inputs',
          -                                                    'xlabel': 'time', 'ylabel':{lbl: lbl for lbl in pump.inputs}})
          -    plot_timeseries(simulated_results.times, simulated_results.states, options={'compact': False, 'title': 'States', 'xlabel': 'time', 'ylabel': ''})
          -    plot_timeseries(simulated_results.times, simulated_results.outputs, options={'compact': False, 'title': 'Outputs', 'xlabel': 'time', 'ylabel': ''})
          -    plot_timeseries(simulated_results.times, simulated_results.event_states, options={'compact': False, 'title': 'Events', 'xlabel': 'time', 'ylabel': ''})
          -    thresholds_met = [pump.threshold_met(x) for x in simulated_results.states]
          -    plot_timeseries(simulated_results.times, thresholds_met, options={'compact': True, 'title': 'Events', 'xlabel': 'time', 'ylabel': ''}, legend = {'display': True})
          -
          -    import matplotlib.pyplot as plt
          -    plt.show()
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/sim_valve.html b/docs/auto_examples/sim_valve.html deleted file mode 100644 index 09f531b5..00000000 --- a/docs/auto_examples/sim_valve.html +++ /dev/null @@ -1,507 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example of a pneumatic valve being simulated until threshold is met.

          -
          from progpy.models.pneumatic_valve import PneumaticValve
          -
          -def run_example():
          -    # Create a model object
          -    valv = PneumaticValve(process_noise= 0)
          -
          -    # Define future loading function
          -    cycle_time = 20
          -    def future_loading(t, x=None):
          -            t = t % cycle_time
          -            if t < cycle_time/2:
          -                return valv.InputContainer({
          -                    'pL': 3.5e5,
          -                    'pR': 2.0e5,
          -                    # Open Valve
          -                    'uTop': False,
          -                    'uBot': True
          -                })
          -            return valv.InputContainer({
          -                'pL': 3.5e5,
          -                'pR': 2.0e5,
          -                # Close Valve
          -                'uTop': True,
          -                'uBot': False
          -            })
          -
          -    # Simulate to threshold
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    # Configure options
          -    config = {
          -        'dt': 0.01,
          -        'horizon': 800,
          -        'save_freq': 60,
          -        'print': True,
          -        'progress': True,
          -    }
          -    # Set wear parameter for spring to 1
          -    valv.parameters['x0']['wk'] = 1
          -
          -    # Define first measured output. This is needed by the simulat_to_threshold method to initialize state
          -    first_output = valv.output(valv.initialize(future_loading(0)))
          -    # Simulate
          -    simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config)
          -
          -    # Simulate to threshold again but with a different wear mode
          -    print('\n\n------------------------------------------------')
          -    print('Simulating to threshold\n\n')
          -    # Configure options
          -    config = {
          -        'dt': 0.01,
          -        'horizon': 800,
          -        'save_freq': 60,
          -        'print': True,
          -        'progress': True
          -    }
          -    # Reset wear parameter for spring to 0, set wear parameter for friction to 1
          -    valv.parameters['x0']['wk'] = 0
          -    valv.parameters['x0']['wr'] = 1
          -
          -    # Define first measured output. This is needed by the simulat_to_threshold method to initialize state
          -    first_output = valv.output(valv.initialize(future_loading(0)))
          -    # Simulate
          -    simulated_results = valv.simulate_to_threshold(future_loading, first_output, **config)
          -
          -# This allows the module to be executed directly
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/state_limits.html b/docs/auto_examples/state_limits.html deleted file mode 100644 index f890d780..00000000 --- a/docs/auto_examples/state_limits.html +++ /dev/null @@ -1,501 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating when and how to identify model state limits.

          -
          from progpy.models.thrown_object import ThrownObject
          -from math import inf
          -
          -def run_example():
          -    # Demo model
          -    # Step 1: Create instance of model (without drag)
          -    m = ThrownObject( cd = 0 )
          -
          -    # Step 2: Setup for simulation
          -    def future_load(t, x=None):
          -        return {}
          -
          -    # add state limits
          -    m.state_limits = {
          -        # object may not go below ground height
          -        'x': (0, inf),
          -
          -        # object may not exceed the speed of light
          -        'v': (-299792458, 299792458)
          -    }
          -
          -    # Step 3: Simulate to impact
          -    event = 'impact'
          -    simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1)
          -
          -    # Print states
          -    print('Example 1')
          -    for i, state in enumerate(simulated_results.states):
          -        print(f'State {i}: {state}')
          -    print()
          -
          -    # Let's try setting x to a number outside of its bounds
          -    x0 = m.initialize(u = {}, z = {})
          -    x0['x'] = -1
          -
          -    simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=1, x = x0)
          -
          -    # Print states
          -    print('Example 2')
          -    for i, state in enumerate(simulated_results.states):
          -        print('State ', i, ': ', state)
          -    print()
          -
          -    # Let's see what happens when the objects speed aproaches its limit
          -    x0 = m.initialize(u = {}, z = {})
          -    x0['x'] = 1000000000
          -    x0['v'] = 0
          -    m.parameters['g'] = -50000000
          -
          -    print('Example 3')
          -    simulated_results = m.simulate_to_threshold(future_load, threshold_keys=[event], dt=0.005, save_freq=0.3, x = x0, print = True, progress = False)
          -
          -    # Note that the limits can also be applied manually using the apply_limits function
          -    print('limiting states')
          -    x = {'x': -5, 'v': 3e8}  # Too fast and below the ground
          -    print('\t Pre-limit: {}'.format(x))
          -    x = m.apply_limits(x)
          -    print('\t Post-limit: {}'.format(x))
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/vectorized.html b/docs/auto_examples/vectorized.html deleted file mode 100644 index 0bfec2ce..00000000 --- a/docs/auto_examples/vectorized.html +++ /dev/null @@ -1,468 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example using simulate_to_threshold with vectorized states. In this example we are using the thrown_object model to simulate multiple thrown objects

          -
          from progpy.models.thrown_object import ThrownObject
          -from numpy import array, all
          -
          -def run_example():
          -    # Step 1: Setup object
          -    m = ThrownObject()
          -    def future_load(t, x=None):
          -        return {}  # No load for thrown objects
          -
          -    # Step 2: Setup vectorized initial state
          -    # For this example we are saying there are 4 throwers of various strengths and heights
          -    first_state = {
          -        'x': array([1.75, 1.8, 1.85, 1.9]),
          -        'v': array([35, 39, 22, 47])
          -    }
          -
          -    # Step 3: Simulate to threshold
          -    # Here we are simulating till impact using the first state defined above
          -    (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(future_load, x = first_state, threshold_keys=['impact'], print = True, dt=0.1, save_freq=2)
          -
          -    # Now lets do the same thing but only stop when all hit the ground
          -    def thresholds_met_eqn(thresholds_met):
          -        return all(thresholds_met['impact'])  # Stop when all impact ground
          -
          -    simulated_results = m.simulate_to_threshold(future_load, x = first_state, thresholds_met_eqn=thresholds_met_eqn, print = True, dt=0.1, save_freq=2)
          -
          -# This allows the module to be executed directly
          -if __name__=='__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/auto_examples/visualize.html b/docs/auto_examples/visualize.html deleted file mode 100644 index 2b8617d6..00000000 --- a/docs/auto_examples/visualize.html +++ /dev/null @@ -1,471 +0,0 @@ - - - - - - - - - - - - <no title> — ProgPy Python Packages 1.7 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          -
          -
          -
          -
          - - - -
          -
          - - - -
          - - - -
          - -
          -
          - -
          -
          - -
          - -
          - -
          - - -
          - -
          - -
          - - - - - - - - - - - - - - - - - -
          - -
          - -
          -
          - - - -
          -

          - -
          -
          - -
          -

          Contents

          -
          - -
          -
          -
          - - - - -
          - - -

          Example demonstrating the Visualization Module.

          -
          import matplotlib.pyplot as plt
          -from progpy.visualize import plot_timeseries
          -from progpy.models.thrown_object import ThrownObject
          -
          -def run_example():
          -    print('Visualize Module Example')
          -    m = ThrownObject()
          -
          -    # Step 2: Setup for simulation
          -    def future_load(t, x=None):
          -        return {}
          -
          -    # Step 3: Simulate to impact
          -    event = 'impact'
          -    options={'dt':0.005, 'save_freq':1}
          -    simulated_results = m.simulate_to_threshold(future_load,
          -                                                                             threshold_keys=[event],
          -                                                                             **options)
          -
          -
          -    # Display states
          -    # ==============
          -    plot_timeseries(simulated_results.times, simulated_results.states,
          -                          options = {'compact': False, 'suptitle': 'state evolution', 'title': True,
          -                                     'xlabel': 'time', 'ylabel': {'x': 'position', 'v': 'velocity'}, 'display_labels': 'minimal'},
          -                          legend  = {'display': True, 'display_at_subplot': 'all'} )
          -    plot_timeseries(simulated_results.times, simulated_results.states, options = {'compact': True, 'suptitle': 'state evolution', 'title': 'example title',
          -                                                    'xlabel': 'time', 'ylabel':'position'})
          -    plt.show()
          -
          -if __name__ == '__main__':
          -    run_example()
          -
          -
          -

          Total running time of the script: ( 0 minutes 0.000 seconds)

          - -

          Gallery generated by Sphinx-Gallery

          - - -
          - - - - -
          - -
          - -
          -
          -
          - -
          - -
          - -
          - - - -
          - - -
          -
          - - -
          - - -
          -
          -
          - - - - - - -Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. - - - \ No newline at end of file diff --git a/docs/dev_guide.html b/docs/dev_guide.html index a93a911a..2ec2ad59 100644 --- a/docs/dev_guide.html +++ b/docs/dev_guide.html @@ -9,7 +9,7 @@ - Developers Guide & Project Plan — ProgPy Python Packages 1.7 documentation + Developers Guide & Project Plan — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,6 +46,7 @@ + @@ -153,6 +153,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -170,6 +171,7 @@

      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
      • -

      - +
    • +

    + -
    • monotonicity() (progpy.predictors.Prediction method)
        @@ -749,22 +755,6 @@

        N

        O

        - +
          -
        • - online_prog - -
        • -
        • - option_scoring - -
        • -
        + +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -330,12 +332,20 @@

          Glossary
          composite model#

          A model consisting of multiple inter-related Prognostics Models, where the input of one model is a function of the output or state of another. This is a tool for representing system-of-systems. Composite models are implemented using the progpy.CompositeModel class.

          +
          continuous model#

          A model where state transition is continuous. Discrete models define the dx equation for state transition.

          +
          +
          continuous state#

          A system state representation that can vary smoothly over a continuous range of values. Discrete states are initialized using a floating point number.

          +
          controller#

          A closed loop future loading method. Calculates future loading as a function of state, like the progpy.loading.controllers.LQR controller used by the progpy.models.aircraft_model.SmallRotorcraft model.

          data-driven model#

          A model where the behavior is learned from data. In ProgPy, data-driven models derive from the parent class progpy.data_models.DataModel. A common example of data-driven models is models using neural networks (e.g., progpy.data_models.LSTMStateTransitionModel).

          direct-prediction model#

          A model where the time of event is directly estimated from the current state and/or future load, instead of predicted through simulation to threshold. These are implemented using the progpy.PrognosticsModel.time_to_event() method.

          +
          discrete model#

          A model where state transition is discrete. Discrete models define the next_state equation for state transition.

          +
          +
          discrete state#

          A system state representation that can only occupy one of a finite set of predefined values. Transitions between discrete states occur based on defined logic or triggering events. Discrete states are initialized using the function progpy.create_discrete_state().

          +
          event#

          Something that can be predicted (e.g., system failure). An event has either occurred or not. See also: threshold

          event state#

          Progress towards event occurring. Defined as a number where an event state of 0 indicates the event has occurred and 1 indicates no progress towards the event (i.e., fully healthy operation for a failure event). For a gradually occurring event (e.g., discharge) the number will progress from 1 to 0 as the event nears. In prognostics, event state is frequently called “State of Health”.

          diff --git a/docs/guide.html b/docs/guide.html index e8e8402c..536fb9ca 100644 --- a/docs/guide.html +++ b/docs/guide.html @@ -9,7 +9,7 @@ - ProgPy Guide — ProgPy Python Packages 1.7 documentation + ProgPy Guide — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -48,6 +47,7 @@ + @@ -154,6 +154,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -171,6 +172,7 @@

      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -347,6 +349,7 @@

          Installing ProgPyTroubleshooting Guide.

          diff --git a/docs/npr7150.html b/docs/npr7150.html index 02192389..5f6caf92 100644 --- a/docs/npr7150.html +++ b/docs/npr7150.html @@ -9,7 +9,7 @@ - NPR 7150 NASA Software Engineering Requirements — ProgPy Python Packages 1.7 documentation + NPR 7150 NASA Software Engineering Requirements — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,6 +46,7 @@ + @@ -152,6 +152,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -169,6 +170,7 @@

      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          diff --git a/docs/objects.inv b/docs/objects.inv index 5c54cfad..0aec1393 100644 Binary files a/docs/objects.inv and b/docs/objects.inv differ diff --git a/docs/prog_algs_guide.html b/docs/prog_algs_guide.html index e0c620cb..8eaac220 100644 --- a/docs/prog_algs_guide.html +++ b/docs/prog_algs_guide.html @@ -9,7 +9,7 @@ - State Estimation and Prediction Guide — ProgPy Python Packages 1.7 documentation + State Estimation and Prediction Guide — ProgPy Python Packages 1.8 documentation @@ -32,11 +32,11 @@ - + @@ -46,9 +46,9 @@ - + @@ -156,6 +156,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -173,6 +174,7 @@
      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -388,6 +390,7 @@

          Installing ProgPyTroubleshooting Guide.

          Summary#

          @@ -543,7 +546,7 @@

          State Estimation -Example -

          Where x0 is the initial state as an UncertainData object (often the output of state estimation), future_loading is a function defining future loading as a function of state and time, and config is a dictionary of any additional configuration parameters, specific to the predictor being used. See Predictors for options available for each predictor

          +

          Where x0 is the initial state as an UncertainData object (often the output of state estimation), future_loading is a function defining future loading as a function of state and time, and config is a dictionary of any additional configuration parameters, specific to the predictor being used. See Predictors for options available for each predictor

          The result of the predict method is a named tuple with the following members:

          • times: array of times for each savepoint such that times[i] corresponds to inputs.snapshot(i)

          • @@ -597,7 +600,7 @@

            Predictionprogpy.predictors.Prediction object containing predicted event states at each savepoint such that event_states.snapshot(i) corresponds to times[i]

          • time_of_event: progpy.uncertain_data.UncertainData object containing the predicted Time of Event (ToE) for each event. Additionally, final state at time of event is saved at time_of_event.final_state -> progpy.uncertain_data.UncertainData for each event

          -

          The stepsize and times at which results are saved can be defined like in a simulation. See Simulation.

          +

          The stepsize and times at which results are saved can be defined like in a simulation. See Simulation.

          @@ -696,7 +699,7 @@

          State Estimation Scatter Plot
          @@ -735,17 +738,13 @@

          Predicted Future StatesPredicted future states, inputs, outputs, and event states come in the form of a progpy.predictors.Prediction object. Predictions store distributions of predicted future values at multiple future times. Predictions contain a number of tools for analyzing the results, some of which are described below:

          • mean: Estimate the mean value at each time. The result is a list of dictionaries such that prediction.mean[i] corresponds to times[i]

          • -
          • -
            monotonicity: Given a single prediction, for each event: go through all predicted states and compare those to the next one.

            Calculates monotonicity for each event key using its associated mean value in UncertainData 4 3

            -
            -
            -
          • +
          • monotonicity: Given a single prediction, for each event: go through all predicted states and compare those to the next one. Calculates monotonicity for each event key using its associated mean value in UncertainData. 4 3

          Time of Event (ToE)#

          Time of Event is also stored as an object of type progpy.uncertain_data.UncertainData, so the analysis functions described in State Estimation are also available for a ToE estimate. See State Estimation or progpy.uncertain_data.UncertainData documentation for details.

          -

          In addition to these standard UncertainData metrics, Probability of Success (PoS) is an important metric for prognostics. Probability of Success is the probability that a event will not occur before a defined time. For example, in aeronautics, PoS might be the probability that no failure will occur before end of mission.

          +

          In addition to these standard UncertainData metrics, Probability of Success (PoS) is an important metric for prognostics. Probability of Success is the probability that a event will not occur before a defined time. For example, in aeronautics, PoS might be the probability that no failure will occur before end of mission.

          Below is an example calculating probability of success:

          >>> from progpy.metrics import prob_success
           >>> ps = prob_success(some_distribution, end_of_mission)
          @@ -754,14 +753,14 @@ 

          Time of Event (ToE)

          ToE Prediction Profile#

          -

          A progpy.predictors.ToEPredictionProfile contains Time of Event (ToE) predictions performed at multiple points. ToEPredictionProfile is frequently used to evaluate the prognostic quality for a given prognostic solution. It contains a number of methods to help with this, including:

          +

          A progpy.predictors.ToEPredictionProfile contains Time of Event (ToE) predictions performed at multiple points. ToEPredictionProfile is frequently used to evaluate the prognostic quality for a given prognostic solution. It contains a number of methods to help with this, including:

          • alpha_lambda: Whether the prediction falls within specified limits at particular times with respect to a performance measure 1 2

          • cumulate_relative_accuracy: The sum of the relative accuracies of each prediction, given a ground truth

          • monotonicity: The monotonicity of the prediction series 4 3

          • prognostic_horizon: The difference between a time \(t_i\), when the predictions meet specified performance criteria, and the time corresponding to the true Time of Event (ToE), for each event 1 2

          -

          A ToEPredictionProfile also contains a plot method (profile.plot(...)), which looks like this:

          +

          A ToEPredictionProfile also contains a plot method (profile.plot(...)), which looks like this:

          _images/alpha_chart.png

          This chart shows the distribution of estimated RUL (y-axis) at different prediction times (x-axis) in red. The ground truth and an alpha bound around the ground truth is displayed in green.

          diff --git a/docs/prog_models_guide.html b/docs/prog_models_guide.html index 77f6b9bb..a46576c4 100644 --- a/docs/prog_models_guide.html +++ b/docs/prog_models_guide.html @@ -9,7 +9,7 @@ - Modeling and Sim Guide — ProgPy Python Packages 1.7 documentation + Modeling and Sim Guide — ProgPy Python Packages 1.8 documentation @@ -32,11 +32,11 @@ - + @@ -46,9 +46,9 @@ - + @@ -156,6 +156,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -173,6 +174,7 @@

      • +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -339,18 +341,18 @@

          Contents

      • Building New Models
      • -
      • Using provided models
      • +
      • Using Provided Models
      • Simulation
      • Parameter Estimation
      • Visualizing Results
      • Combination Models
      • Other Examples
      • -
      • Tips
      • +
      • Tips & Best Practices
      • References
      • @@ -389,6 +391,7 @@

        Installing ProgPyTroubleshooting Guide.

        Getting Started#

        @@ -410,12 +413,27 @@

        States

        \(x(t+dt) = f(t, x(t), u(t), dt, \Theta)\)

        where \(x(t)\) is state at time \(t\), \(u(t)\) is input at time \(t\) , \(dt\) is the stepsize, and \(\Theta\) are the model parameters .

        -

        In a ProgPy model, this state transition can be represented one of two ways, either discrete or continuous, depending on the nature of state transition. In the case of continuous models, state transition behavior is defined by defining the first derivative, using the progpy.PrognosticsModel.dx() method. For discrete models, state transition behavior is defined using the progpy.PrognosticsModel.next_state() method. The continuous state transition behavior is recommended, because defining the first derivative enables some approaches that rely on that information.

        +

        In a ProgPy model, this state transition can be represented one of two ways, either discrete or continuous, depending on the nature of state transition. In the case of continuous models, state transition behavior is defined by defining the first derivative, using the progpy.PrognosticsModel.dx() method. For discrete models, state transition behavior is defined using the progpy.PrognosticsModel.next_state() method. The continuous state transition behavior is recommended, because defining the first derivative enables some approaches that rely on that information.

        _images/next_state.png _images/dx.png +

        States can also be discrete or continuous. Discrete states are those which can only exist in a finite set of values. Continuous states are initialized with a number and discrete states are initialized using the function progpy.create_discrete_state(), like the examples below. Each discrete state represents a unique condition or mode, and transitions between states are governed by defined rules or events, providing clarity and predictability in state management.

        +
        >>> from progpy import create_discrete_state
        +>>> ValveState = create_discrete_state(2, ["open", "closed"])
        +>>> x["valve"] = ValveState.open
        +
        +
        +
        >>> from progpy import create_discrete_state
        +>>> GearState = create_discrete_state(5, transition="sequential")
        +>>> x["gear"] = GearState(1)
        +
        +
        +
        +

        Note

        +

        Discrete states are different from discrete models. Discrete models are models where state transition is discrete, where discrete states are where the state itself is discrete. Discrete models may have continuous states.

        +

        See the noise section in the example below for details on how to configure proccess and measurement noise in ProgPy.

        @@ -580,9 +595,9 @@

        Future Loadingreturn m.InputContainer({'input1': ...}) -

        See example below for details on how to provide future loading information in ProgPy.

        +

        See the future loading section in the example below for details on how to provide future loading information in ProgPy.

        @@ -594,12 +609,12 @@

        General Notes#

        ProgPy provides a framework for building new models. Generally, models can be divided into three basis categories: physics-based models, data-driven models, and hybrid models. Additionally, models can rely on state-transition for prediction, or they can use what is called direct-prediction. These two categories are described below.

        -

        State-transition Models#

        +

        State-Transition Models#

        -

        New physics-based models are constructed by subclassing progpy.PrognosticsModel as illustrated in the first example. To generate a new model, create a new class for your model that inherits from this class. Alternatively, you can copy the template prog_model_template.ProgModelTemplate, replacing the methods with logic defining your specific model. The analysis and simulation tools defined in progpy.PrognosticsModel will then work with your new model.

        +

        New physics-based models are constructed by subclassing progpy.PrognosticsModel as illustrated in the first example. To generate a new model, create a new class for your model that inherits from this class. Alternatively, you can copy the template prog_model_template.ProgModelTemplate, replacing the methods with logic defining your specific model. The analysis and simulation tools defined in progpy.PrognosticsModel will then work with your new model.

        For simple linear models, users can choose to subclass the simpler progpy.LinearModel class, as illustrated in the second example. Some methods and algorithms only function on linear models.

        -

        Direct-prediction models#

        -

        Direct-prediction models are models that estimate time of event directly from the current state and future load, instead of being predicted through state transition. When models are pure direct-prediction models, future states cannot be predicted. See example below for more information.

        +

        Direct-Prediction Models#

        +

        Direct-prediction models are models that estimate time of event directly from the current state and future load, instead of being predicted through state transition. When models are pure direct-prediction models, future states cannot be predicted. See the direct models section in the example below for more information.

        Using Data#

        -

        Wether you’re using data-driven, physics-based, expert knowledge, or some hybrid approach, building and validating a model requires data. In the case of data-driven approaches, data is used to train and validate the model. In the case of physics-based, data is used to estimate parameters (see Parameter Estimation) and validate the model.

        +

        Whether you’re using data-driven, physics-based, expert knowledge, or some hybrid approach, building and validating a model requires data. In the case of data-driven approaches, data is used to train and validate the model. In the case of physics-based, data is used to estimate parameters (see Parameter Estimation) and validate the model.

        ProgPy includes some example datasets. See ProgPy Datasets and the example below for details.

        @@ -828,10 +826,10 @@

        Parameter Estimation>>> m.estimate_params([run1_data, run2_data], params_to_estimate, dt=0.01) -

        See the example below for more details

        +

        See the example below for more details.

        Note

        -

        Parameters are changes in-place, so the model on which estimate_params is called, is now tuned to match the data

        +

        Parameters are changes in-place, so the model on which estimate_params is called, is now tuned to match the data.

        @@ -846,15 +844,10 @@

        Visualizing Results

        Combination Models#

        -

        There are two methods in progpy through which multiple models can be combined and used together: composite models and ensemble models, described below.

        -

        For more details, see:

        -
        -
        +

        There are two methods in progpy through which multiple models can be combined and used together: composite models and ensemble models, described below. For more details, see the example below.

        +

        06. Combining Models

        -

        Composite models are used to represent the behavior of a system of interconnected systems. Each system is represented by its own model. These models are combined into a single composite model which behaves as a single model. When definiting the composite model the user provides a discription of any connections between the state or output of one model and the input of another. For example,

        +

        Composite models are used to represent the behavior of a system of interconnected systems. Each system is represented by its own model. These models are combined into a single composite model which behaves as a single model. When definiting the composite model the user provides a discription of any connections between the state or output of one model and the input of another. For example,

        >>> m = CompositeModel(
         >>>     models = [model1, model2],
         >>>     connections = [
        @@ -888,18 +881,18 @@ 

        Other Examplesexamples.sensitivity
        -
      • -
        examples.serialization
        -
        -
      • -
        -

        Tips#

        +
        +

        Tips & Best Practices#

        • If you’re only doing diagnostics without prognostics- just define a next_state equation with no change of state and don’t perform prediction. The state estimator can still be used to estimate if any of the events have occured.

        • -
        • Sudden event’s use a binary event state (1=healthy, 0=failed).

        • +
        • Sudden events use a binary event state (1=healthy, 0=failed).

        • You can predict as many events as you would like, sometimes one event must happen before another, in this case the event occurance for event 1 can be a part of the equation for event 2 (‘event 2’: event_1 and [OTHER LOGIC]).

        • +
        • Minimize the number of state variables whenever possible

        • +
        • Whenever possible, if calculations dont include state or inputs, include values as parameters or derived parameters instead of calculating within state transition

        • +
        • Use constant units throughout the model

        • +
        • Document all assumptions and limitations

        @@ -980,18 +973,18 @@

        ReferencesBuilding New Models -
      • Using provided models
      • +
      • Using Provided Models
      • Simulation
      • Parameter Estimation
      • Visualizing Results
      • Combination Models
      • Other Examples
      • -
      • Tips
      • +
      • Tips & Best Practices
      • References
      • diff --git a/docs/prog_server_guide.html b/docs/prog_server_guide.html index 70a7468d..bc33bb42 100644 --- a/docs/prog_server_guide.html +++ b/docs/prog_server_guide.html @@ -9,7 +9,7 @@ - prog_server Guide — ProgPy Python Packages 1.7 documentation + prog_server Guide — ProgPy Python Packages 1.8 documentation @@ -32,11 +32,11 @@ - + @@ -46,8 +46,8 @@ - + @@ -155,6 +155,7 @@
      • CompositeModel
      • DataModel
      • Datasets
      • +
      • Discrete States
      • EnsembleModel
      • Included Models
      • LinearModel
      • @@ -172,6 +173,7 @@ +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan diff --git a/docs/py-modindex.html b/docs/py-modindex.html index 5c278c7f..98e41f22 100644 --- a/docs/py-modindex.html +++ b/docs/py-modindex.html @@ -8,7 +8,7 @@ - Python Module Index — ProgPy Python Packages 1.7 documentation + Python Module Index — ProgPy Python Packages 1.8 documentation @@ -31,7 +31,6 @@ - @@ -45,7 +44,6 @@ - @@ -159,6 +157,7 @@
      • CompositeModel
      • DataModel
      • Datasets
      • +
      • Discrete States
      • EnsembleModel
      • Included Models
      • LinearModel
      • @@ -176,6 +175,7 @@ +
      • Troubleshooting Guide
      • Release Notes
      • Glossary
      • Developers Guide & Project Plan
          @@ -286,25 +286,11 @@

          Python Module Index

          - o | p
          - - - - - - - - diff --git a/docs/releases.html b/docs/releases.html index 4e02f8aa..18510e60 100644 --- a/docs/releases.html +++ b/docs/releases.html @@ -9,7 +9,7 @@ - Release Notes — ProgPy Python Packages 1.7 documentation + Release Notes — ProgPy Python Packages 1.8 documentation @@ -32,7 +32,6 @@ - @@ -47,11 +46,12 @@ + - + @@ -153,6 +153,7 @@
        • CompositeModel
        • DataModel
        • Datasets
        • +
        • Discrete States
        • EnsembleModel
        • Included Models
        • LinearModel
        • @@ -170,6 +171,7 @@ +
        • Troubleshooting Guide
        • Release Notes
        • Glossary
        • Developers Guide & Project Plan
            @@ -322,45 +324,54 @@

            Contents

        •  
          - o
          - online_prog -
          - option_scoring -
           
          p