diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..c6e9e36a2 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ + +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "monthly" \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fbf806c13..54147a822 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,72 +2,53 @@ name: JWQL CI on: [push, pull_request] -jobs: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true - JWQL-CI: +defaults: + run: + shell: bash -l {0} - name: Python - ${{ matrix.python-version }} +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - run: pip install bandit + - run: bandit ./jwql/ -c .bandit + + test: + name: test (Python ${{ matrix.python-version }}, ${{ matrix.os }}) runs-on: ${{ matrix.os }} strategy: max-parallel: 5 + fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - python-version: [3.8, 3.9] + python-version: [3.9, "3.10"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - - uses: conda-incubator/setup-miniconda@v2 + - uses: mamba-org/provision-with-micromamba@v15 with: - activate-environment: jwql-${{ matrix.python-version }} + environment-file: ./environment_python_${{ matrix.python-version }}.yml + cache-env: true + cache-downloads: true - - name: Bandit Check - uses: jpetrucciani/bandit-check@master - with: - path: "./jwql/" - bandit_flags: "-c .bandit" - continue-on-error: false - if: runner.os == 'Linux' + - run: pip install -e .[test] - - name: Build jwql conda environment and run tests - shell: bash -l {0} - run: | + - run: conda env export - echo "Setting useful environment variables:" - echo " " - export PYTHONVERSION=${{ matrix.python-version }} - export MAJOR="$(echo $PYTHONVERSION | head -c 1)" - export MINOR="$(echo $PYTHONVERSION | tail -c 2)" - echo "PYTHONVERSION: $PYTHONVERSION" - - echo " " - echo "Installing jwql conda environment" - echo " " - cd $RUNNER_WORKSPACE/ - cd jwql/ - $CONDA/bin/conda env update -f environment_python_${MAJOR}_${MINOR}.yml -n jwql-${{ matrix.python-version }} - - echo " " - echo "Installing jwql package" - echo " " - python setup.py develop - - echo " " - echo "Testing package installation" - echo " " - python -c "import jwql; print('Version: ' + jwql.__version__); print('Path: ' + jwql.__path__[0])" - - # echo " " - # echo "Reinstall numpy to fix numpy.core.multiarray error" - # echo " " - pip install -U numpy + - uses: supercharge/redis-github-action@1.4.0 + with: + redis-version: 5.0 + if: runner.os == 'Linux' - echo " " - echo "The conda environment being used:" - echo " " - $CONDA/bin/conda env export + - run: python -c "import jwql; print('Version ' + jwql.__version__); print('Path ' + jwql.__path__[0])" - echo " " - echo "Running pytests" - echo " " - pytest jwql/tests/ + - run: pytest jwql/tests/ diff --git a/.gitignore b/.gitignore index 33e46db5d..64320a824 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,8 @@ *.ipynb_checkpoints/ config.json .DS_Store +.idea +.coverage* build/ dist/ jwql.egg-info/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 668472f84..069f15a23 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -4,17 +4,29 @@ # Required version: 2 # Set the version of Python and other tools you might need +# Explicit sqlalchemy install using post_install in order to get around the RTD +# command pip install --upgrade --upgrade-strategy eager +# This command upgrades all packages listed in rtd_reuirements.txt. This was causing +# a problem when upgrading to sqlalchemy >=2.0, which has large changes from versions +# <2.0, and was causing jwql to crash. build: - os: ubuntu-20.04 + os: ubuntu-22.04 tools: python: "3.9" + jobs: + post_install: + - pip install sqlalchemy==1.4.46 + # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/source/conf.py + fail_on_warning: false # If using Sphinx, optionally build your docs in additional formats such as PDF # formats: # - pdf # Optionally declare the Python requirements required to build your docs python: install: - - requirements: rtd_requirements.txt \ No newline at end of file + - requirements: rtd_requirements.txt + - method: pip + path: . diff --git a/CHANGES.rst b/CHANGES.rst index 304f1dbde..2452df232 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +Unreleased +========== + +Web Application +~~~~~~~~~~~~~~~ + +- Dark and Bad Pixel Monitors delete unnecessary outputs to save disk space (#933) + + 1.1.1 (2022-04-05) ================== diff --git a/MANIFEST.in b/MANIFEST.in index 478c9f4d7..20b6bed98 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,7 +4,7 @@ include LICENSE include CHANGES.rst include environment_python_3_7.yml include environment_python_3_8.yml -include setup.py +include pyproject.toml include requirements.txt include rtd_requirements.txt include jwql/example_config.json diff --git a/README.md b/README.md index 2a1e36a3f..377005a11 100644 --- a/README.md +++ b/README.md @@ -44,12 +44,12 @@ Getting `jwql` up and running on your own computer requires four steps, detailed ### Prerequisites -It is highly suggested that contributors have a working installation of `anaconda` or `miniconda` for Python 3.8. Downloads and installation instructions are available here: +It is highly suggested that contributors have a working installation of `anaconda` or `miniconda` for Python 3.9+. Downloads and installation instructions are available here: - [Miniconda](https://conda.io/miniconda.html) - [Anaconda](https://www.continuum.io/downloads) -Requirements for contributing to the `jwql` package will be included in the `jwql` `conda` environment, which is included in our installation instructions below. Further package requirements will be provided for `jwql` by a `setup.py` script included in the repository. +Requirements for contributing to the `jwql` package will be included in the `jwql` `conda` environment, which is included in our installation instructions below. Further package requirements will be provided for `jwql` by a `pyproject.toml` file included in the repository. ### Clone the `jwql` repo @@ -83,16 +83,16 @@ source activate base/root **Note:** If you have added a step activating conda to your default terminal/shell (e.g. the `.bashrc`, `.zshrc`, or `.profile` file) then you don't need to do the above step. -Lastly, create the `jwql` environment via one of the `environment.yml` files (currently `environment_python_3_8.yml`, for python 3.8, and `environment_python_3.9.yml`, for python 3.9, are supported by `jwql`): +Lastly, create the `jwql` environment via one of the `environment.yml` files (currently `environment_python_3.9.yml`, for python 3.9, and `environment_python_3.10.yml`, for python 3.10, are supported by `jwql`): ``` -conda env create -f environment_python_3_8.yml +conda env create -f environment_python_3.9.yml ``` or ``` -conda env create -f environment_python_3.9.yml +conda env create -f environment_python_3.10.yml ``` ### Configuration File @@ -154,10 +154,11 @@ Any questions about the `jwql` project or its software can be directed to `jwql@ - Mees Fix (Technical Lead, INS) [@mfixstsci](https://github.com/mfixstsci) - Misty Cracraft (INS) [@cracraft](https://github.com/cracraft) - Mike Engesser (INS) [@mengesser](https://github.com/mengesser) -- Shannon Osborne (INS) [@shanosborne](https://github.com/shanosborne) - Maria Pena-Guerrero [@penaguerrero](https://github.com/penaguerrero) - Ben Sunnquist (INS) [@bsunnquist](https://github.com/bsunnquist) - Brian York (INS) [@york-stsci](https://github.com/york-stsci) +- Bradley Sappington (INS) [@bradleysappington](https://github.com/bradleysappington) +- Melanie Clarke (INS) [@melanieclarke](https://github.com/melanieclarke) ## Past Development Team Members - Matthew Bourque (INS) [@bourque](https://github.com/bourque) @@ -168,7 +169,7 @@ Any questions about the `jwql` project or its software can be directed to `jwql@ - Sara Ogaz (DMD) [@SaOgaz](https://github.com/SaOgaz) - Catherine Martlin (INS) [@catherine-martlin](https://github.com/catherine-martlin) - Johannes Sahlmann (INS) [@Johannes-Sahlmann](https://github.com/johannes-sahlmann) - +- Shannon Osborne (INS) [@shanosborne](https://github.com/shanosborne) ## Acknowledgments: - Faith Abney (DMD) diff --git a/docs/source/common_monitors.rst b/docs/source/common_monitors.rst index b67bb5d2e..cb16375ef 100644 --- a/docs/source/common_monitors.rst +++ b/docs/source/common_monitors.rst @@ -20,6 +20,12 @@ dark_monitor.py :members: :undoc-members: +edb_telemetry_monitor.py +------------------------ +.. automodule:: jwql.instrument_monitors.common_monitors.edb_telemetry_monitor + :members: + :undoc-members: + readnoise_monitor.py -------------------- .. automodule:: jwql.instrument_monitors.common_monitors.readnoise_monitor diff --git a/docs/source/conf.py b/docs/source/conf.py index 3dccd9d1f..367964e7e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -82,7 +82,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # This is a fix for warnings because of sphinx-autodoc interaction for classes, # however it removes method table from the docs. diff --git a/docs/source/jwql_monitors.rst b/docs/source/jwql_monitors.rst index 55446ea97..42221e004 100644 --- a/docs/source/jwql_monitors.rst +++ b/docs/source/jwql_monitors.rst @@ -25,9 +25,3 @@ monitor_filesystem.py .. automodule:: jwql.jwql_monitors.monitor_filesystem :members: :undoc-members: - -monitor_mast.py ---------------- -.. automodule:: jwql.jwql_monitors.monitor_mast - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/tests.rst b/docs/source/tests.rst index 68020cfc3..576a126e9 100644 --- a/docs/source/tests.rst +++ b/docs/source/tests.rst @@ -50,6 +50,12 @@ test_edb.py :members: :undoc-members: +test_edb_telemetry_monitor.py +----------------------------- +.. automodule:: jwql.tests.test_edb_telemetry_monitor + :members: + :undoc-members: + test_instrument_properties.py ----------------------------- .. automodule:: jwql.tests.test_instrument_properties @@ -68,9 +74,9 @@ test_logging_functions.py :members: :undoc-members: -test_monitor_mast.py +test_mast_utils.py -------------------- -.. automodule:: jwql.tests.test_monitor_mast +.. automodule:: jwql.tests.test_mast_utils :members: :undoc-members: diff --git a/docs/source/utils.rst b/docs/source/utils.rst index 856caed01..3956b7afc 100644 --- a/docs/source/utils.rst +++ b/docs/source/utils.rst @@ -2,12 +2,6 @@ utils ***** -anomaly_query_config.py ------------------------ -.. automodule:: jwql.utils.anomaly_query_config - :members: - :undoc-members: - calculations.py --------------- .. automodule:: jwql.utils.calculations @@ -90,4 +84,4 @@ utils.py -------- .. automodule:: jwql.utils.utils :members: - :undoc-members: \ No newline at end of file + :undoc-members: diff --git a/docs/source/website.rst b/docs/source/website.rst index 530f367ae..6f0da9b31 100644 --- a/docs/source/website.rst +++ b/docs/source/website.rst @@ -26,12 +26,6 @@ data_containers.py :members: :undoc-members: -db.py ------ -.. automodule:: jwql.website.apps.jwql.db - :members: - :undoc-members: - forms.py -------- .. automodule:: jwql.website.apps.jwql.forms diff --git a/environment_python_3.10.yml b/environment_python_3.10.yml new file mode 100644 index 000000000..9f2ed409b --- /dev/null +++ b/environment_python_3.10.yml @@ -0,0 +1,73 @@ +# This file describes a conda environment that can be to install jwql +# +# Run the following command to set up this environment: +# $ conda env create -f environment_python_3.10.yml +# +# The environment name can be overridden with the following command: +# $ conda env create -n -f environment_python_3.10.yml +# +# Run the following command to activate the environment: +# $ source activate jwql-3.10 +# +# To deactivate the environment run the following command: +# $ source deactivate +# +# To remove the environment entirely, run the following command: +# $ conda env remove -n jwql-3.10 + +name: jwql-3.10 + +channels: + - conda-forge + - defaults + +dependencies: + - astropy=5.3.4 + - beautifulsoup4=4.12.2 + - bokeh=2.4.3 + - celery=5.3.4 + - cryptography=41.0.4 + - django=4.2.6 + - inflection=0.5.1 + - ipython=8.16.1 + - jinja2=3.1.2 + - jsonschema=4.19.1 + - matplotlib=3.8.0 + - nodejs=20.8.0 + - numpy=1.25.2 + - numpydoc=1.5.0 + - pandas=2.1.1 + - pip=23.2.1 + - postgresql=15.4 + - psycopg2=2.9.7 + - pytest=7.4.2 + - pytest-cov=4.1.0 + - pytest-mock=3.11.1 + - python=3.10.12 + - pyyaml=6.0.1 + - redis=5.0.0 + - ruff=0.0.292 + - scipy=1.9.3 + - setuptools=68.2.2 + - sphinx=7.2.6 + - sphinx_rtd_theme=1.3.0 + - sqlalchemy=2.0.21 + - twine=4.0.2 + - wtforms=3.0.1 + + - pip: + - astroquery==0.4.6 + - bandit==1.7.5 + - jwst==1.12.3 + - pysiaf==0.20.0 + - pysqlite3==0.5.2 + - pyvo==1.4.2 + - redis==5.0.0 + - selenium==4.13.0 + - stdatamodels==1.8.3 + - stsci_rtd_theme==1.0.0 + - vine==5.0.0 + - git+https://github.com/spacetelescope/jwst_reffiles + + # Current package + - -e . diff --git a/environment_python_3.9.yml b/environment_python_3.9.yml new file mode 100644 index 000000000..4f4159244 --- /dev/null +++ b/environment_python_3.9.yml @@ -0,0 +1,73 @@ +# This file describes a conda environment that can be to install jwql +# +# Run the following command to set up this environment: +# $ conda env create -f environment_python_3.9.yml +# +# The environment name can be overridden with the following command: +# $ conda env create -n -f environment_python_3.9.yml +# +# Run the following command to activate the environment: +# $ source activate jwql-3.9 +# +# To deactivate the environment run the following command: +# $ source deactivate +# +# To remove the environment entirely, run the following command: +# $ conda env remove -n jwql-3.9 + +name: jwql-3.9 + +channels: + - conda-forge + - defaults + +dependencies: + - astropy=5.3.3 + - beautifulsoup4=4.12.2 + - bokeh=2.4.3 + - celery=5.3.4 + - cryptography=41.0.4 + - django=4.2.5 + - inflection=0.5.1 + - ipython=8.16.1 + - jinja2=3.1.2 + - jsonschema=4.19.1 + - matplotlib=3.8.0 + - nodejs=20.8.0 + - numpy=1.25.2 + - numpydoc=1.5.0 + - pandas=2.1.1 + - pip=23.2.1 + - postgresql=15.4 + - psycopg2=2.9.7 + - pytest=7.4.2 + - pytest-cov=4.1.0 + - pytest-mock=3.11.1 + - python=3.9.17 + - pyyaml=6.0.1 + - redis=5.0.0 + - ruff=0.0.292 + - scipy=1.9.3 + - setuptools=68.2.2 + - sphinx=7.2.6 + - sphinx_rtd_theme=1.3.0 + - sqlalchemy=2.0.21 + - twine=4.0.2 + - wtforms=3.0.1 + + - pip: + - astroquery==0.4.6 + - bandit==1.7.5 + - jwst==1.12.3 + - pysiaf==0.20.0 + - pysqlite3==0.5.2 + - pyvo==1.4.2 + - redis==5.0.0 + - selenium==4.13.0 + - stdatamodels==1.8.3 + - stsci_rtd_theme==1.0.0 + - vine==5.0.0 + - git+https://github.com/spacetelescope/jwst_reffiles + + # Current package + - -e . diff --git a/environment_python_3_8.yml b/environment_python_3_8.yml deleted file mode 100644 index 201a23558..000000000 --- a/environment_python_3_8.yml +++ /dev/null @@ -1,68 +0,0 @@ -# This file describes a conda environment that can be to install jwql -# -# Run the following command to set up this environment: -# $ conda env create -f environment_python_3_8.yml -# -# The environment name can be overridden with the following command: -# $ conda env create -n -f environment_python_3_8.yml -# -# Run the following command to activate the environment: -# $ source activate jwql-3.8 -# -# To deactivate the environment run the following command: -# $ source deactivate -# -# To remove the environment entirely, run the following command: -# $ conda env remove -n jwql-3.8 - -name: jwql-3.8 - -channels: - - defaults - -dependencies: - - astropy=5.0.2 - - bokeh=2.4.2 - - beautifulsoup4=4.10.0 - - codecov=2.1.11 - - cryptography=36.0.0 - - django=3.2.5 - - flake8=3.9.2 - - inflection=0.5.1 - - ipython=7.29.0 - - jinja2=3.0.2 - - jsonschema=3.2.0 - - matplotlib=3.4.2 - - nodejs=10.13.0 - - numpy=1.20.3 - - numpydoc=1.1.0 - - pandas=1.3.4 - - pip=21.0.1 - - postgresql=12.2 - - psycopg2=2.8.6 - - pytest=6.2.4 - - pytest-cov=3.0.0 - - python=3.8.5 - - pyyaml=5.4.1 - - scipy=1.7.1 - - setuptools=52.0.0 - - sphinx=4.2.0 - - sphinx_rtd_theme=0.4.3 - - sqlalchemy=1.4.27 - - twine=3.4.1 - - wtforms=2.3.3 - - - pip: - - asdf==2.11.0 - - astroquery==0.4.5 - - bandit==1.7.4 - - crds==11.10.0 - - jwst==1.3.3 - - pysiaf==0.11.0 - - pysqlite3==0.4.3 - - stdatamodels==0.2.4 - - stsci_rtd_theme==0.0.2 - - git+https://github.com/spacetelescope/jwst_reffiles - - # Current package - - -e . diff --git a/environment_python_3_9.yml b/environment_python_3_9.yml deleted file mode 100644 index 5dbeb075f..000000000 --- a/environment_python_3_9.yml +++ /dev/null @@ -1,68 +0,0 @@ -# This file describes a conda environment that can be to install jwql -# -# Run the following command to set up this environment: -# $ conda env create -f environment_python_3_9.yml -# -# The environment name can be overridden with the following command: -# $ conda env create -n -f environment_python_3_9.yml -# -# Run the following command to activate the environment: -# $ source activate jwql-3.9 -# -# To deactivate the environment run the following command: -# $ source deactivate -# -# To remove the environment entirely, run the following command: -# $ conda env remove -n jwql-3.9 - -name: jwql-3.9 - -channels: - - defaults - -dependencies: - - astropy=4.3.1 - - bokeh=2.4.2 - - beautifulsoup4=4.9.3 - - codecov=2.1.11 - - cryptography=36.0.0 - - django=3.1.7 - - flake8=3.9.2 - - inflection=0.5.1 - - ipython=7.29.0 - - jinja2=3.0.2 - - jsonschema=3.2.0 - - matplotlib=3.4.2 - - nodejs=10.13.0 - - numpy=1.20.3 - - numpydoc=1.1.0 - - pandas=1.3.4 - - pip=21.0.1 - - postgresql=12.2 - - psycopg2=2.8.6 - - pytest=6.2.4 - - pytest-cov=3.0.0 - - python=3.9.11 - - pyyaml=5.4.1 - - scipy=1.7.1 - - setuptools=52.0.0 - - sphinx=4.2.0 - - sphinx_rtd_theme=0.4.3 - - sqlalchemy=1.4.27 - - twine=3.4.1 - - wtforms=2.3.3 - - - pip: - - asdf==2.8.3 - - astroquery==0.4.5 - - bandit==1.7.1 - - crds==11.5.2 - - jwst==1.3.3 - - pysiaf==0.11.0 - - pysqlite3==0.4.3 - - stdatamodels==0.2.4 - - stsci_rtd_theme==0.0.2 - - git+https://github.com/spacetelescope/jwst_reffiles - - # Current package - - -e . diff --git a/jwql/__init__.py b/jwql/__init__.py index c9bbfc74f..d1949b6ee 100644 --- a/jwql/__init__.py +++ b/jwql/__init__.py @@ -1,22 +1,13 @@ -import os -import pkg_resources +from importlib.metadata import version from jwql.utils import utils -module_path = pkg_resources.resource_filename('jwql', '') -setup_path = os.path.normpath(os.path.join(module_path, '../setup.py')) - +__version__ = version('jwql') try: - with open(setup_path) as f: - data = f.readlines() - - for line in data: - if 'VERSION =' in line: - __version__ = line.split(' ')[-1].replace("'", "").strip() config_version = utils.get_config()['jwql_version'] if __version__ != config_version: - print("Warning: config file JWQL version is {}, while JWQL is using {}".format(config_version, __version__)) + print(f"Warning: config file JWQL version is {config_version}, " + f"while JWQL is using {__version__}") except FileNotFoundError: - print('Could not determine jwql version') - __version__ = '0.0.0' + print('Could not determine jwql config version') diff --git a/jwql/database/clone_tables.py b/jwql/database/clone_tables.py new file mode 100644 index 000000000..9258aa284 --- /dev/null +++ b/jwql/database/clone_tables.py @@ -0,0 +1,109 @@ +#! /usr/bin/env python + +"""Clone a table (or tables) from the ``jwqldb`` databases. In particular, this script +supports cloning a table (or tables) into ``pandas`` frames, and supports writing a +``pandas`` frame into a database table. The intent of the script is to let a table (or all +the tables related to a particular monitor) from production to be copied to either the +test or dev database, to allow for local testing that can match the state of the +production database. + +Note that, before using this script, you must ensure that the source and destination +table(s) have the same columns and, if you want an exact copy, the destination table +should be emtpy. For the above reasons, unless you have a compelling reason not to do so, +you should run the ``reset_database.py`` script before running this script. + +The intended workflow is that you first read a set of tables from production:: + + python clone_tables.py read -m bad_pixel + +Then, after moving the pandas file(s) to the appropriate server, write them:: + + python clone_tables.py write -m bad_pixel + +*These commands work because they are not intended to be run on the same server or as the +same user.* + +Authors +------- + + - Brian York + +Use +--- + + This script is intended to be used in the command line: + :: + + python clone_tables.py {write|read} [-i instrument] [-m monitor] [-t tables] + +Dependencies +------------ + + Users must have a ``config.json`` configuration file with a proper + ``connection_string`` key that points to the ``jwqldb`` database. + The ``connection_string`` format is + ``postgresql+psycopg2://user:password@host:port/database``. +""" + +import argparse +import os +import sys + +import pandas + +from jwql.database import database_interface +from jwql.database.database_interface import INSTRUMENT_TABLES, MONITOR_TABLES + + +if __name__ == '__main__': + + ins_help = "Instrument tables to clone" + mon_help = "Monitor tables to clone" + tab_help = "Individual tables to clone (comma-separated list)" + action_help = "Action to take (read or write)" + parser = argparse.ArgumentParser(description='Reset JWQL database tables') + parser.add_argument('action', metavar='ACTION', type=str, help=pipe_help) + parser.add_argument('-i', '--instrument', metavar='INSTRUMENT', type=str, + help=ins_help, default=None, dest='instrument') + parser.add_argument('-m', '--monitor', metavar='MONITOR', type=str, + help=mon_help, default=None, dest='monitor') + parser.add_argument('-t', '--table', metavar='TABLE', type=str, + help=tab_help, default=None, dest='table') + args = parser.parse_args() + + if args.instrument is not None: + instrument = args.instrument.lower() + active_tables = INSTRUMENT_TABLES[instrument] + elif args.monitor is not None: + monitor = args.monitor.lower() + active_tables = MONITOR_TABLES[monitor] + elif args.table is not None: + table_list = args.table.split(",") + active_tables = [] + for table in table_list: + if hasattr(database_interface, table): + active_tables.append(getattr(database_interface, table)) + else: + print("ERROR: Unknown table {}. Skipping.".format(table)) + if len(active_tables) == 0: + print("ERROR: No tables selected.") + sys.exit(1) + else: + print("ERROR: Must specify one (and only one) of instrument, monitor, or table") + sys.exit(1) + + action = args.action.lower() + if action == 'read': # read tables to pandas + for table in active_tables: + frame = database_interface.session.query(table).data_frame + frame.to_csv(table.__tablename__+'.csv.gz') + elif action == 'write': # write pandas to tables + for table in active_tables: + file_name = table.__tablename__+'.csv.gz' + if not os.path.isfile(file_name): + print("ERROR: Archive {} not found. Skipping.".format(file_name)) + continue + frame = pandas.read_csv(file_name) + frame.to_sql(table.__tablename__, database_interface.engine) + else: + print("ERROR: Unknown action {}".format(action)) diff --git a/jwql/database/database_interface.py b/jwql/database/database_interface.py old mode 100755 new mode 100644 index 08d775149..8f660209c --- a/jwql/database/database_interface.py +++ b/jwql/database/database_interface.py @@ -28,6 +28,7 @@ - Bryan Hilbert - Misty Cracraft - Sara Ogaz + - Maria Pena-Guerrero Use --- @@ -62,6 +63,7 @@ import socket import pandas as pd +from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import create_engine @@ -73,11 +75,10 @@ from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Time +from sqlalchemy import text from sqlalchemy import UniqueConstraint -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import declarative_base, sessionmaker from sqlalchemy.orm.query import Query -from sqlalchemy.sql import text from sqlalchemy.types import ARRAY from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT @@ -92,7 +93,7 @@ @property def data_frame(self): """Method to return a ``pandas.DataFrame`` of the results""" - + # NOTE: this requires pandas>=2 if sqlalchemy>=2 return pd.read_sql(self.statement, self.session.bind) @@ -134,10 +135,10 @@ def load_connection(connection_string): https://github.com/spacetelescope/acsql/blob/master/acsql/database/database_interface.py """ engine = create_engine(connection_string, echo=False) - base = declarative_base(engine) + base = declarative_base() Session = sessionmaker(bind=engine) session = Session() - meta = MetaData(engine) + meta = MetaData() return session, base, engine, meta @@ -152,6 +153,23 @@ def load_connection(connection_string): session, base, engine, meta = load_connection(SETTINGS['connection_string']) +class FilesystemCharacteristics(base): + """ORM for table containing instrument-specific lists of the number of + obervations corresponding to various instrument characteristics (e.g. + filters) + """ + + # Name the table + __tablename__ = 'filesystem_characteristics' + + # Define the columns + id = Column(Integer, primary_key=True, nullable=False) + date = Column(DateTime, nullable=False) + instrument = Column(Enum(*JWST_INSTRUMENT_NAMES, name='instrument_name_enum'), nullable=False) + filter_pupil = Column(ARRAY(String, dimensions=1)) + obs_per_filter_pupil = Column(ARRAY(Integer, dimensions=1)) + + class FilesystemGeneral(base): """ORM for the general (non instrument specific) filesystem monitor table""" @@ -287,6 +305,7 @@ def get_monitor_columns(data_dict, table_name): # Define column types data_type_dict = {'integer': Integer(), + 'bigint': BigInteger(), 'string': String(), 'float': Float(precision=32), 'decimal': Float(precision='13,8'), @@ -354,6 +373,28 @@ def get_monitor_table_constraints(data_dict, table_name): return data_dict +def get_unique_values_per_column(table, column_name): + """Return a list of the unique values from a particular column in the + given table. + + Parameters + ---------- + table : sqlalchemy.orm.decl_api.DeclarativeMeta + SQL table to be searched. (e.g. table = eval('NIRCamDarkPixelStats')) + + column_name : str + Column name within the table to query + + Returns + ------- + distinct_colvals : list + List of unique values in the given column + """ + colvals = session.query(eval(f'table.{column_name}')).distinct() + distinct_colvals = [eval(f'x.{column_name}') for x in colvals] + return sorted(distinct_colvals) + + def monitor_orm_factory(class_name): """Create a ``SQLAlchemy`` ORM Class for a ``jwql`` instrument monitor. @@ -389,16 +430,6 @@ class : obj return type(class_name, (base,), data_dict) -def set_read_permissions(): - """Set read permissions for db tables""" - - db_username = SETTINGS['database']['user'] - db_username = '_'.join(db_username.split('_')[:-1]) - db_account = '{}_read'.format(db_username) - command = 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO {};'.format(db_account) - engine.execute(command) - - # Create tables from ORM factory NIRCamAnomaly = anomaly_orm_factory('nircam_anomaly') NIRISSAnomaly = anomaly_orm_factory('niriss_anomaly') @@ -446,6 +477,100 @@ def set_read_permissions(): MIRIReadnoiseStats = monitor_orm_factory('miri_readnoise_stats') FGSReadnoiseQueryHistory = monitor_orm_factory('fgs_readnoise_query_history') FGSReadnoiseStats = monitor_orm_factory('fgs_readnoise_stats') +NIRCamEDBDailyStats = monitor_orm_factory('nircam_edb_daily_stats') +NIRCamEDBBlockStats = monitor_orm_factory('nircam_edb_blocks_stats') +NIRCamEDBTimeIntervalStats = monitor_orm_factory('nircam_edb_time_interval_stats') +NIRCamEDBEveryChangeStats = monitor_orm_factory('nircam_edb_every_change_stats') +MIRIEDBDailyStats = monitor_orm_factory('miri_edb_daily_stats') +MIRIEDBBlockStats = monitor_orm_factory('miri_edb_blocks_stats') +MIRIEDBTimeIntervalStats = monitor_orm_factory('miri_edb_time_interval_stats') +MIRIEDBEveryChangeStats = monitor_orm_factory('miri_edb_every_change_stats') +NIRISSEDBDailyStats = monitor_orm_factory('niriss_edb_daily_stats') +NIRISSEDBBlockStats = monitor_orm_factory('niriss_edb_blocks_stats') +NIRISSEDBTimeIntervalStats = monitor_orm_factory('niriss_edb_time_interval_stats') +NIRISSEDBEveryChangeStats = monitor_orm_factory('niriss_edb_every_change_stats') +FGSEDBDailyStats = monitor_orm_factory('fgs_edb_daily_stats') +FGSEDBBlockStats = monitor_orm_factory('fgs_edb_blocks_stats') +FGSEDBTimeIntervalStats = monitor_orm_factory('fgs_edb_time_interval_stats') +FGSEDBEveryChangeStats = monitor_orm_factory('fgs_edb_every_change_stats') +NIRSpecEDBDailyStats = monitor_orm_factory('nirspec_edb_daily_stats') +NIRSpecEDBBlockStats = monitor_orm_factory('nirspec_edb_blocks_stats') +NIRSpecEDBTimeIntervalStats = monitor_orm_factory('nirspec_edb_time_interval_stats') +NIRSpecEDBEveryChangeStats = monitor_orm_factory('nirspec_edb_every_change_stats') +NIRCamCosmicRayQueryHistory = monitor_orm_factory('nircam_cosmic_ray_query_history') +NIRCamCosmicRayStats = monitor_orm_factory('nircam_cosmic_ray_stats') +MIRICosmicRayQueryHistory = monitor_orm_factory('miri_cosmic_ray_query_history') +MIRICosmicRayStats = monitor_orm_factory('miri_cosmic_ray_stats') +NIRISSCosmicRayQueryHistory = monitor_orm_factory('niriss_cosmic_ray_query_history') +NIRISSCosmicRayStats = monitor_orm_factory('niriss_cosmic_ray_stats') +FGSCosmicRayQueryHistory = monitor_orm_factory('fgs_cosmic_ray_query_history') +FGSCosmicRayStats = monitor_orm_factory('fgs_cosmic_ray_stats') +NIRSpecCosmicRayQueryHistory = monitor_orm_factory('nirspec_cosmic_ray_query_history') +NIRSpecCosmicRayStats = monitor_orm_factory('nirspec_cosmic_ray_stats') +NIRSpecTAQueryHistory = monitor_orm_factory('nirspec_ta_query_history') +NIRSpecTAStats = monitor_orm_factory('nirspec_ta_stats') + +INSTRUMENT_TABLES = { + 'nircam': [NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent, + NIRCamBiasQueryHistory, NIRCamBiasStats, NIRCamBadPixelQueryHistory, + NIRCamBadPixelStats, NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats, + NIRCamAnomaly, NIRCamCosmicRayQueryHistory, NIRCamCosmicRayStats, + NIRCamEDBDailyStats, NIRCamEDBBlockStats, NIRCamEDBTimeIntervalStats, + NIRCamEDBEveryChangeStats], + 'niriss': [NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent, + NIRISSBiasQueryHistory, NIRISSBiasStats, NIRISSBadPixelQueryHistory, + NIRISSBadPixelStats, NIRISSReadnoiseQueryHistory, NIRISSReadnoiseStats, + NIRISSAnomaly, NIRISSCosmicRayQueryHistory, NIRISSCosmicRayStats, + NIRISSEDBDailyStats, NIRISSEDBBlockStats, NIRISSEDBTimeIntervalStats, + NIRISSEDBEveryChangeStats], + 'miri': [MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent, + MIRIBadPixelQueryHistory, MIRIBadPixelStats, MIRIReadnoiseQueryHistory, + MIRIReadnoiseStats, MIRIAnomaly, MIRICosmicRayQueryHistory, MIRICosmicRayStats, + MIRIEDBDailyStats, MIRIEDBBlockStats, MIRIEDBTimeIntervalStats, + MIRIEDBEveryChangeStats], + 'nirspec': [NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent, + NIRSpecBiasQueryHistory, NIRSpecBiasStats, NIRSpecBadPixelQueryHistory, + NIRSpecBadPixelStats, NIRSpecReadnoiseQueryHistory, NIRSpecReadnoiseStats, + NIRSpecAnomaly, NIRSpecTAQueryHistory, NIRSpecTAStats, + NIRSpecCosmicRayQueryHistory, NIRSpecCosmicRayStats, + NIRSpecEDBDailyStats, NIRSpecEDBBlockStats, NIRSpecEDBTimeIntervalStats, + NIRSpecEDBEveryChangeStats], + 'fgs': [FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent, + FGSBadPixelQueryHistory, FGSBadPixelStats, FGSReadnoiseQueryHistory, + FGSReadnoiseStats, FGSAnomaly, FGSCosmicRayQueryHistory, FGSCosmicRayStats, + FGSEDBDailyStats, FGSEDBBlockStats, FGSEDBTimeIntervalStats, + FGSEDBEveryChangeStats]} + +MONITOR_TABLES = { + 'anomaly': [NIRCamAnomaly, NIRISSAnomaly, NIRSpecAnomaly, MIRIAnomaly, FGSAnomaly], + 'cosmic_ray': [NIRCamCosmicRayQueryHistory, NIRCamCosmicRayStats, + MIRICosmicRayQueryHistory, MIRICosmicRayStats, + NIRISSCosmicRayQueryHistory, NIRISSCosmicRayStats, + FGSCosmicRayQueryHistory, FGSCosmicRayStats, + NIRSpecCosmicRayQueryHistory, NIRSpecCosmicRayStats], + 'dark': [NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent, + NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent, + NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent, + MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent, + FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent], + 'bias': [NIRCamBiasQueryHistory, NIRCamBiasStats, NIRISSBiasQueryHistory, + NIRISSBiasStats, NIRSpecBiasQueryHistory, NIRSpecBiasStats], + 'bad_pixel': [NIRCamBadPixelQueryHistory, NIRCamBadPixelStats, NIRISSBadPixelStats, + NIRISSBadPixelQueryHistory, FGSBadPixelQueryHistory, FGSBadPixelStats, + MIRIBadPixelQueryHistory, MIRIBadPixelStats, NIRSpecBadPixelQueryHistory, + NIRSpecBadPixelStats], + 'readnoise': [NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats, NIRISSReadnoiseStats, + NIRISSReadnoiseQueryHistory, NIRSpecReadnoiseQueryHistory, + NIRSpecReadnoiseStats, MIRIReadnoiseQueryHistory, MIRIReadnoiseStats, + FGSReadnoiseQueryHistory, FGSReadnoiseStats], + 'ta': [NIRSpecTAQueryHistory, NIRSpecTAStats], + 'edb': [NIRCamEDBDailyStats, NIRCamEDBBlockStats, NIRCamEDBTimeIntervalStats, + NIRCamEDBEveryChangeStats, NIRISSEDBDailyStats, NIRISSEDBBlockStats, + NIRISSEDBTimeIntervalStats, NIRISSEDBEveryChangeStats, MIRIEDBDailyStats, + MIRIEDBBlockStats, MIRIEDBTimeIntervalStats, MIRIEDBEveryChangeStats, + NIRSpecEDBDailyStats, NIRSpecEDBBlockStats, NIRSpecEDBTimeIntervalStats, + NIRSpecEDBEveryChangeStats, FGSEDBDailyStats, FGSEDBBlockStats, + FGSEDBTimeIntervalStats, FGSEDBEveryChangeStats]} if __name__ == '__main__': base.metadata.create_all(engine) diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_cosmic_ray_query_history.txt b/jwql/database/monitor_table_definitions/fgs/fgs_cosmic_ray_query_history.txt new file mode 100644 index 000000000..bbea160bb --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_cosmic_ray_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_cosmic_ray_stats.txt b/jwql/database/monitor_table_definitions/fgs/fgs_cosmic_ray_stats.txt new file mode 100644 index 000000000..4d7a0415a --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_cosmic_ray_stats.txt @@ -0,0 +1,8 @@ +APERTURE, string +SOURCE_FILE, string +OBS_START_TIME, datetime +OBS_END_TIME, datetime +JUMP_COUNT, integer +JUMP_RATE, float +MAGNITUDE, integer_array_1d +OUTLIERS, bigint_array_1d diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_edb_blocks_stats.txt b/jwql/database/monitor_table_definitions/fgs/fgs_edb_blocks_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_edb_blocks_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_edb_daily_stats.txt b/jwql/database/monitor_table_definitions/fgs/fgs_edb_daily_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_edb_daily_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_edb_every_change_stats.txt b/jwql/database/monitor_table_definitions/fgs/fgs_edb_every_change_stats.txt new file mode 100644 index 000000000..18d5d91a2 --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_edb_every_change_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIME, datetime_array_1d +MNEMONIC_VALUE, float_array_1d +MEDIAN, float +STDEV, float +DEPENDENCY_MNEMONIC, string +DEPENDENCY_VALUE, string +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_edb_mnemonics.txt b/jwql/database/monitor_table_definitions/fgs/fgs_edb_mnemonics.txt new file mode 100644 index 000000000..8839373c6 --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_edb_mnemonics.txt @@ -0,0 +1,6 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/fgs/fgs_edb_time_interval_stats.txt b/jwql/database/monitor_table_definitions/fgs/fgs_edb_time_interval_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/fgs/fgs_edb_time_interval_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/miri/miri_cosmic_ray_query_history.txt b/jwql/database/monitor_table_definitions/miri/miri_cosmic_ray_query_history.txt new file mode 100644 index 000000000..bbea160bb --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_cosmic_ray_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/miri/miri_cosmic_ray_stats.txt b/jwql/database/monitor_table_definitions/miri/miri_cosmic_ray_stats.txt new file mode 100644 index 000000000..4d7a0415a --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_cosmic_ray_stats.txt @@ -0,0 +1,8 @@ +APERTURE, string +SOURCE_FILE, string +OBS_START_TIME, datetime +OBS_END_TIME, datetime +JUMP_COUNT, integer +JUMP_RATE, float +MAGNITUDE, integer_array_1d +OUTLIERS, bigint_array_1d diff --git a/jwql/database/monitor_table_definitions/miri/miri_edb_blocks_stats.txt b/jwql/database/monitor_table_definitions/miri/miri_edb_blocks_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_edb_blocks_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/miri/miri_edb_daily_stats.txt b/jwql/database/monitor_table_definitions/miri/miri_edb_daily_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_edb_daily_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/miri/miri_edb_every_change_stats.txt b/jwql/database/monitor_table_definitions/miri/miri_edb_every_change_stats.txt new file mode 100644 index 000000000..18d5d91a2 --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_edb_every_change_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIME, datetime_array_1d +MNEMONIC_VALUE, float_array_1d +MEDIAN, float +STDEV, float +DEPENDENCY_MNEMONIC, string +DEPENDENCY_VALUE, string +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/miri/miri_edb_mnemonics.txt b/jwql/database/monitor_table_definitions/miri/miri_edb_mnemonics.txt new file mode 100644 index 000000000..8839373c6 --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_edb_mnemonics.txt @@ -0,0 +1,6 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/miri/miri_edb_time_interval_stats.txt b/jwql/database/monitor_table_definitions/miri/miri_edb_time_interval_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/miri/miri_edb_time_interval_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_cosmic_ray_query_history.txt b/jwql/database/monitor_table_definitions/nircam/nircam_cosmic_ray_query_history.txt new file mode 100644 index 000000000..bbea160bb --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_cosmic_ray_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_cosmic_ray_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_cosmic_ray_stats.txt new file mode 100644 index 000000000..4d7a0415a --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_cosmic_ray_stats.txt @@ -0,0 +1,8 @@ +APERTURE, string +SOURCE_FILE, string +OBS_START_TIME, datetime +OBS_END_TIME, datetime +JUMP_COUNT, integer +JUMP_RATE, float +MAGNITUDE, integer_array_1d +OUTLIERS, bigint_array_1d diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_edb_blocks_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_edb_blocks_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_edb_blocks_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_edb_daily_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_edb_daily_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_edb_daily_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_edb_every_change_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_edb_every_change_stats.txt new file mode 100644 index 000000000..18d5d91a2 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_edb_every_change_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIME, datetime_array_1d +MNEMONIC_VALUE, float_array_1d +MEDIAN, float +STDEV, float +DEPENDENCY_MNEMONIC, string +DEPENDENCY_VALUE, string +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_edb_time_interval_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_edb_time_interval_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_edb_time_interval_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_cosmic_ray_query_history.txt b/jwql/database/monitor_table_definitions/niriss/niriss_cosmic_ray_query_history.txt new file mode 100644 index 000000000..bbea160bb --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_cosmic_ray_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_cosmic_ray_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_cosmic_ray_stats.txt new file mode 100644 index 000000000..4d7a0415a --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_cosmic_ray_stats.txt @@ -0,0 +1,8 @@ +APERTURE, string +SOURCE_FILE, string +OBS_START_TIME, datetime +OBS_END_TIME, datetime +JUMP_COUNT, integer +JUMP_RATE, float +MAGNITUDE, integer_array_1d +OUTLIERS, bigint_array_1d diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_edb_blocks_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_edb_blocks_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_edb_blocks_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_edb_daily_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_edb_daily_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_edb_daily_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_edb_every_change_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_edb_every_change_stats.txt new file mode 100644 index 000000000..18d5d91a2 --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_edb_every_change_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIME, datetime_array_1d +MNEMONIC_VALUE, float_array_1d +MEDIAN, float +STDEV, float +DEPENDENCY_MNEMONIC, string +DEPENDENCY_VALUE, string +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_edb_mnemonics.txt b/jwql/database/monitor_table_definitions/niriss/niriss_edb_mnemonics.txt new file mode 100644 index 000000000..8839373c6 --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_edb_mnemonics.txt @@ -0,0 +1,6 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/niriss/niriss_edb_time_interval_stats.txt b/jwql/database/monitor_table_definitions/niriss/niriss_edb_time_interval_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/niriss/niriss_edb_time_interval_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_cosmic_ray_query_history.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_cosmic_ray_query_history.txt new file mode 100644 index 000000000..bbea160bb --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_cosmic_ray_query_history.txt @@ -0,0 +1,6 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +FILES_FOUND, integer +RUN_MONITOR, bool \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_cosmic_ray_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_cosmic_ray_stats.txt new file mode 100644 index 000000000..4d7a0415a --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_cosmic_ray_stats.txt @@ -0,0 +1,8 @@ +APERTURE, string +SOURCE_FILE, string +OBS_START_TIME, datetime +OBS_END_TIME, datetime +JUMP_COUNT, integer +JUMP_RATE, float +MAGNITUDE, integer_array_1d +OUTLIERS, bigint_array_1d diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_blocks_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_blocks_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_blocks_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_daily_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_daily_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_daily_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_every_change_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_every_change_stats.txt new file mode 100644 index 000000000..18d5d91a2 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_every_change_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIME, datetime_array_1d +MNEMONIC_VALUE, float_array_1d +MEDIAN, float +STDEV, float +DEPENDENCY_MNEMONIC, string +DEPENDENCY_VALUE, string +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_mnemonics.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_mnemonics.txt new file mode 100644 index 000000000..8839373c6 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_mnemonics.txt @@ -0,0 +1,6 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_time_interval_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_time_interval_stats.txt new file mode 100644 index 000000000..fea6791aa --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_edb_time_interval_stats.txt @@ -0,0 +1,9 @@ +MNEMONIC, string +LATEST_QUERY, datetime +TIMES, datetime_array_1d +DATA, float_array_1d +STDEV, float_array_1d +MEDIAN, float_array_1d +MAX, float_array_1d +MIN, float_array_1d +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_ta_query_history.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_ta_query_history.txt new file mode 100644 index 000000000..c6deea152 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_ta_query_history.txt @@ -0,0 +1,8 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +ENTRIES_FOUND, integer +FILES_FOUND, integer +RUN_MONITOR, bool +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nirspec/nirspec_ta_stats.txt b/jwql/database/monitor_table_definitions/nirspec/nirspec_ta_stats.txt new file mode 100644 index 000000000..e39239433 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nirspec/nirspec_ta_stats.txt @@ -0,0 +1,33 @@ +UNCAL_FILENAME, string +APERTURE, string +DETECTOR, string +SUBARRAY, string +READ_PATTERN, string +NINTS, string +NGROUPS, string +EXPSTART, string +FULL_IMAGE_MEAN, float +FULL_IMAGE_STDDEV, float +FULL_IMAGE_N, float_array_1d +FULL_IMAGE_BIN_CENTERS, float_array_1d +DIFF_IMAGE_MEAN, float +DIFF_IMAGE_STDDEV, float +DIFF_IMAGE_N, float_array_1d +DIFF_IMAGE_BIN_CENTERS, float_array_1d +ENTRY_DATE, datetime +AMP1_MEAN, float +AMP1_STDDEV, float +AMP1_N, float_array_1d +AMP1_BIN_CENTERS, float_array_1d +AMP2_MEAN, float +AMP2_STDDEV, float +AMP2_N, float_array_1d +AMP2_BIN_CENTERS, float_array_1d +AMP3_MEAN, float +AMP3_STDDEV, float +AMP3_N, float_array_1d +AMP3_BIN_CENTERS, float_array_1d +AMP4_MEAN, float +AMP4_STDDEV, float +AMP4_N, float_array_1d +AMP4_BIN_CENTERS, float_array_1d \ No newline at end of file diff --git a/jwql/database/reset_database.py b/jwql/database/reset_database.py index e2a932fd7..691a7749f 100644 --- a/jwql/database/reset_database.py +++ b/jwql/database/reset_database.py @@ -24,23 +24,108 @@ ``postgresql+psycopg2://user:password@host:port/database``. """ -from jwql.database.database_interface import base, set_read_permissions +import argparse +from psycopg2.errors import UndefinedTable +from sqlalchemy.exc import ProgrammingError +import sys + +from jwql.database.database_interface import base +from jwql.database.database_interface import INSTRUMENT_TABLES, MONITOR_TABLES from jwql.utils.utils import get_config if __name__ == '__main__': + ins_help = "Instrument tables to reset ('all' for all), default 'all'" + mon_help = "Monitor tables to reset ('all' for all), default 'all'" + parser = argparse.ArgumentParser(description='Reset JWQL database tables') + parser.add_argument('-i', '--instrument', metavar='INSTRUMENT', type=str, + help=ins_help, default='all', dest='instrument') + parser.add_argument('-m', '--monitor', metavar='MONITOR', type=str, + help=mon_help, default='all', dest='monitor') + parser.add_argument('--explicitly_reset_production', action='store_true', + default=False, help='Needed to allow reset of Production tables', + dest='explicit_prod') + parser.add_argument('--explicitly_reset_anomalies', action='store_true', + default=False, help='Needed to allow reset of anomaly tables', + dest='explicit_anomaly') + args = parser.parse_args() + + instrument = args.instrument.lower() + monitor = args.monitor.lower() + + if instrument != 'all' and instrument not in INSTRUMENT_TABLES: + sys.stderr.write("ERROR: Unknown instrument {}".format(instrument)) + sys.exit(1) + if monitor != 'all' and monitor not in MONITOR_TABLES: + sys.stderr.write("ERROR: Unknown monitor {}".format(monitor)) + sys.exit(1) + connection_string = get_config()['connection_string'] server_type = connection_string.split('@')[-1][0] - assert server_type != 'p', 'Cannot reset production database!' + if server_type == 'p' and not args.explicit_prod: + msg = "ERROR: Can't reset production databases without explicitly setting the " + msg += "--explicitly_reset_production flag!" + sys.stderr.write(msg) + sys.exit(1) + + if monitor == 'anomaly' and not args.explicit_anomaly: + msg = "ERROR: Can't reset anomaly tables without explicitly setting the " + msg += "--explicitly_reset_anomalies flag!" + sys.stderr.write(msg) + sys.exit(1) + + msg = 'About to reset instruments: [{}] and monitors: [{}] tables for database instance {}. Do you wish to proceed? (y/N)' + response = input(msg.format(instrument, monitor, connection_string)) - prompt = ('About to reset all tables for database instance {}. Do you ' - 'wish to proceed? (y/n)\n'.format(connection_string)) - response = input(prompt) + if response.lower() != 'y': + print("Did not enter y/Y. Stopping.") + sys.exit(0) + else: + tables = [] + if instrument != 'all': + base_tables = INSTRUMENT_TABLES[instrument] + if monitor == 'all': + check_tables = base_tables + else: + check_tables = MONITOR_TABLES[monitor] + elif monitor != 'all': + base_tables = MONITOR_TABLES[monitor] + if instrument == 'all': + check_tables = base_tables + else: + check_tables = INSTRUMENT_TABLES[instrument] + else: # instrument and monitor are both 'all' + if args.explicit_anomaly: # really delete everything + base.metadata.drop_all() + base.metadata.create_all() + print('\nDatabase instance {} has been reset'.format(connection_string)) + sys.exit(0) + else: + for monitor in MONITOR_TABLES: + if monitor != 'anomaly': + for table in MONITOR_TABLES[monitor]: + try: + table.__table__.drop() + except ProgrammingError as pe: + if not isinstance(pe.orig, UndefinedTable): + raise pe + print("Table {} is undefined. Skipping drop table.".format(table)) + table.__table__.create() + print('\nDatabase instance {} has been reset'.format(connection_string)) + sys.exit(0) - if response.lower() == 'y': - base.metadata.drop_all() - base.metadata.create_all() - set_read_permissions() + # Choosing what to reset. We want every table in base_tables that is *also* in + # check_tables. + for table in base_tables: + if table in check_tables: + if (table not in MONITOR_TABLES['anomaly']) or (args.explicit_anomaly): + try: + table.__table__.drop() + except ProgrammingError as pe: + if not isinstance(pe.orig, UndefinedTable): + raise pe + print("Table {} is undefined. Skipping drop table.".format(table)) + table.__table__.create() print('\nDatabase instance {} has been reset'.format(connection_string)) diff --git a/jwql/edb/engineering_database.py b/jwql/edb/engineering_database.py index 65a77954e..fea43aafa 100644 --- a/jwql/edb/engineering_database.py +++ b/jwql/edb/engineering_database.py @@ -12,6 +12,7 @@ - Johannes Sahlmann - Mees Fix + - Bryan Hilbert Use --- @@ -42,22 +43,27 @@ data that include the datapoint preceding the requested start time and the datapoint that follows the requested end time. """ - +import calendar from collections import OrderedDict -import copy -from datetime import datetime +from datetime import datetime, timedelta +from numbers import Number import os -import tempfile import warnings +from astropy.io import ascii +from astropy.stats import sigma_clipped_stats from astropy.table import Table from astropy.time import Time +import astropy.units as u from astroquery.mast import Mast from bokeh.embed import components -from bokeh.plotting import figure, show +from bokeh.layouts import column +from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d +from bokeh.plotting import figure, output_file, show, save import numpy as np from jwst.lib.engdb_tools import ENGDB_Service +from jwql.utils.constants import MIRI_POS_RATIO_VALUES from jwql.utils.credentials import get_mast_base_url, get_mast_token from jwql.utils.utils import get_config @@ -72,8 +78,111 @@ class EdbMnemonic: """Class to hold and manipulate results of DMS EngDB queries.""" + def __add__(self, mnem): + """Allow EdbMnemonic instances to be added (i.e. combine their data). + info and metadata will not be touched. Data will be updated. Duplicate + rows due to overlapping dates will be removed. The overlap is assumed to + be limited to a single section of the end of once EdbMnemonic instance and + the beginning of the other instance. Either one of the two instances to be + added can contain the earlier dates. The function will check the starting + date of each instance and treat the earlier starting date as the instance + that is first. Blocks will be updated to account for removed duplicate rows. + + Parameters + ---------- + mnem : jwql.edb.engineering_database.EdbMnemonic + Instance to be added to the current instance - def __init__(self, mnemonic_identifier, start_time, end_time, data, meta, info): + Returns + ------- + new_obj : jwql.edb.engineering_database.EdbMnemonic + Summed instance + """ + # Do not combine two instances of different mnemonics + if self.mnemonic_identifier != mnem.mnemonic_identifier: + raise ValueError((f'Unable to concatenate EdbMnemonic instances for {self.info["tlmMnemonic"]} ' + 'and {mnem.info["tlmMnemonic"]}.')) + + # Case where one instance has an empty data table + if len(self.data["dates"]) == 0: + return mnem + if len(mnem.data["dates"]) == 0: + return self + + if np.min(self.data["dates"]) < np.min(mnem.data["dates"]): + early_dates = self.data["dates"].data + late_dates = mnem.data["dates"].data + early_data = self.data["euvalues"].data + late_data = mnem.data["euvalues"].data + early_blocks = self.blocks + late_blocks = mnem.blocks + else: + early_dates = mnem.data["dates"].data + late_dates = self.data["dates"].data + early_data = mnem.data["euvalues"].data + late_data = self.data["euvalues"].data + early_blocks = mnem.blocks + late_blocks = self.blocks + + # Remove any duplicates, based on the dates entries + # Keep track of the indexes of the removed rows, so that any blocks + # information can be updated + all_dates = np.append(early_dates, late_dates) + unique_dates, unq_idx = np.unique(all_dates, return_index=True) + + # Combine the data and keep only unique elements + all_data = np.append(early_data, late_data) + unique_data = all_data[unq_idx] + + # This assumes that if there is overlap between the two date arrays, that + # the overlap all occurs in a single continuous block at the beginning of + # the later set of dates. It will not do the right thing if you ask it to + # (e.g.) interleave two sets of dates. + overlap_len = len(unique_dates) - len(all_dates) + + # Shift the block values for the later instance to account for any removed + # duplicate rows + if late_blocks[0] is not None: + new_late_blocks = late_blocks - overlap_len + if early_blocks[0] is None: + new_blocks = new_late_blocks + else: + new_blocks = np.append(early_blocks, new_late_blocks) + else: + if early_blocks[0] is not None: + new_blocks = early_blocks + else: + new_blocks = [None] + + new_data = Table([unique_dates, unique_data], names=('dates', 'euvalues')) + new_obj = EdbMnemonic(self.mnemonic_identifier, self.data_start_time, self.data_end_time, + new_data, self.meta, self.info, blocks=new_blocks) + + if self.mean_time_block is not None: + new_obj.mean_time_block = self.mean_time_block + elif mnem.mean_time_block is not None: + new_obj.mean_time_block = mnem.mean_time_block + else: + new_obj.mean_time_block = None + + # Combine any existing mean, median, min, max data, removing overlaps + # All of these are populated in concert with median_times, so we can + # use that to look for overlap values + all_median_times = np.array(list(self.median_times) + list(mnem.median_times)) + srt = np.argsort(all_median_times) + comb_median_times = all_median_times[srt] + unique_median_times, idx_median_times = np.unique(comb_median_times, return_index=True) + + new_obj.median_times = unique_median_times + new_obj.mean = np.array(list(self.mean) + list(mnem.mean))[srt][idx_median_times] + new_obj.median = np.array(list(self.median) + list(mnem.median))[srt][idx_median_times] + new_obj.max = np.array(list(self.max) + list(mnem.max))[srt][idx_median_times] + new_obj.min = np.array(list(self.min) + list(mnem.min))[srt][idx_median_times] + + return new_obj + + def __init__(self, mnemonic_identifier, start_time, end_time, data, meta, info, blocks=[None], + mean_time_block=None): """Populate attributes. Parameters @@ -91,6 +200,12 @@ def __init__(self, mnemonic_identifier, start_time, end_time, data, meta, info): info : dict Auxiliary information on the mnemonic (description, category, unit) + blocks : list + Index numbers corresponding to the beginning of separate blocks + of data. This can be used to calculate separate statistics for + each block. + mean_time_block : astropy.units.quantity.Quantity + Time period over which data are averaged """ self.mnemonic_identifier = mnemonic_identifier @@ -98,55 +213,563 @@ def __init__(self, mnemonic_identifier, start_time, end_time, data, meta, info): self.requested_end_time = end_time self.data = data + self.mean = [] + self.median = [] + self.stdev = [] + self.median_times = [] + self.min = [] + self.max = [] + self.mean_time_block = mean_time_block + + self.meta = meta + self.info = info + self.blocks = np.array(blocks) + if len(self.data) == 0: self.data_start_time = None self.data_end_time = None else: - self.data_start_time = Time(np.min(self.data['dates']), scale='utc') - self.data_end_time = Time(np.max(self.data['dates']), scale='utc') + self.data_start_time = np.min(self.data['dates']) + self.data_end_time = np.max(self.data['dates']) + if isinstance(self.data['euvalues'][0], Number) and 'TlmMnemonics' in self.meta: + self.full_stats() + + def __len__(self): + """Report the length of the data in the instance""" + return len(self.data["dates"]) + + def __mul__(self, mnem): + """Allow EdbMnemonic instances to be multiplied (i.e. combine their data). + info will be updated with new units if possible. Data will be updated. + Blocks will not be updated, under the assumption that the times in self.data + will all be kept, and therefore self.blocks will remain correct after + multiplication. - self.meta = meta - self.info = info + Parameters + ---------- + mnem : jwql.edb.engineering_database.EdbMnemonic + Instance to be multiplied into the current instance + + Returns + ------- + new_obj : jwql.edb.engineering_database.EdbMnemonic + New object where the data table is the product of those in the inputs + """ + # If the data has only a single entry, we won't be able to interpolate, and therefore + # we can't multiply it. Return an empty EDBMnemonic instance + if len(mnem.data["dates"].data) < 2: + mnem.data["dates"] = [] + mnem.data["euvalues"] = [] + return mnem + + # First, interpolate the data in mnem onto the same times as self.data + mnem.interpolate(self.data["dates"].data) + + # Extrapolation will not be done, so make sure that we account for any elements + # that were removed rather than extrapolated. Find all the dates for which + # data exists in both instances. + common_dates, self_idx, mnem_idx = np.intersect1d(self.data["dates"], mnem.data["dates"], + return_indices=True) + + # Adjust self.blocks based on the new dates. For each block, find the index of common_dates + # that corresponds to its previous date, and use that index in the new blocks list. Note that + # we will do this for self.blocks. mnem.blocks is ignored and will not factor in to the + # new blocks list. We have to choose either self.blocks or mnem.blocks to keep, and it makes + # more sense to keep with self.blocks since this is a method of self.data + new_blocks = [0] + for block in self.blocks: + try: + prev_date = self.data['dates'][block] + before = np.where(common_dates == self.data['dates'][block])[0] + + if len(before) > 0: + new_blocks.append(before[0]) # + 1) + except IndexError: + # The final block value is usually equal to the length of the array, and will + # therefore cause an Index Error in the lines above. Ignore that error here. + # This way, if the final block is less than the length of the array, we can + # still process it properly. + pass + + # The last element of blocks should be the final element of the data + if new_blocks[-1] != len(common_dates): + new_blocks.append(len(common_dates)) + + # Strip away any rows from the tables that are not common to both instances + self_data = self.data[self_idx] + mnem_data = mnem.data[mnem_idx] + + # Mulitply + new_tab = Table() + new_tab["dates"] = common_dates + new_tab["euvalues"] = self_data["euvalues"] * mnem_data["euvalues"] + + new_obj = EdbMnemonic(self.mnemonic_identifier, self.requested_start_time, self.requested_end_time, + new_tab, self.meta, self.info, blocks=new_blocks) + if self.mean_time_block is not None: + new_obj.mean_time_block = self.mean_time_block + elif mnem.mean_time_block is not None: + new_obj.mean_time_block = mnem.mean_time_block + else: + new_obj.mean_time_block = None + + try: + combined_unit = (u.Unit(self.info['unit']) * u.Unit(mnem.info['unit'])).compose()[0] + new_obj.info['unit'] = f'{combined_unit}' + new_obj.info['tlmMnemonic'] = f'{self.info["tlmMnemonic"]} * {mnem.info["tlmMnemonic"]}' + new_obj.info['description'] = f'({self.info["description"]}) * ({mnem.info["description"]})' + except KeyError: + pass + return new_obj def __str__(self): """Return string describing the instance.""" return 'EdbMnemonic {} with {} records between {} and {}'.format( - self.mnemonic_identifier, len(self.data), self.data_start_time.isot, - self.data_end_time.isot) + self.mnemonic_identifier, len(self.data), self.data_start_time, + self.data_end_time) - def interpolate(self, times, **kwargs): - """Interpolate value at specified times.""" - raise NotImplementedError + def block_stats(self, sigma=3, ignore_vals=[], ignore_edges=False, every_change=False): + """Calculate stats for a mnemonic where we want a mean value for + each block of good data, where blocks are separated by times where + the data are ignored. - def bokeh_plot(self, show_plot=False): - """Make basic bokeh plot showing value as a function of time. + Parameters + ---------- + sigma : int + Number of sigma to use for sigma clipping + + ignore_vals : list + Any elements with values matching values in this list will be ignored + + ignore_edges : bool + If True, the first and last elements of each block will be ignored. This + is intended primarily for the MIRI ever_change data in IMIR_HK_xxx_POS_RATIO, + where the position ratio values are not exactly synced up with the IMIR_HK_xxx_CUR_POS + value. In that case, the first or last elements can have values from a time when + the ratio has not yet settled to its final value. + + every_change : bool + If True, the data are assumed to be every_change data. This is used when dealing with + blocks that exclusively contain data to be ignored + """ + means = [] + medians = [] + maxs = [] + mins = [] + stdevs = [] + medtimes = [] + remove_change_indexes = [] + if type(self.data["euvalues"].data[0]) not in [np.str_, str]: + for i, index in enumerate(self.blocks[0:-1]): + # Protect against repeated block indexes + if index < self.blocks[i + 1]: + if self.meta['TlmMnemonics'][0]['AllPoints'] != 0: + block = self.data["euvalues"].data[index:self.blocks[i + 1]] + + empty_block = False + uvals = np.unique(block) + if np.array_equal(np.array(sorted(ignore_vals)), uvals): + empty_block = True + meanval, medianval, stdevval, maxval, minval = np.nan, np.nan, np.nan, np.nan, np.nan + + # If the block is composed entirely of data to be ignored, then we don't + # add new mean, median, max, min, stdev values, and we also need to remove + # the associated entry from self.every_change_values and self.blocks + # (only in the case of every_change data) + if every_change: + remove_change_indexes.append(i) + + else: + # If there are values to be ignored, remove those from the array + # of elements. Keep track of whether the first and last are ignored. + ignore_first = False + ignore_last = False + for ignore_val in ignore_vals: + ignore_idx = np.where(block == ignore_val) + block = np.delete(block, ignore_idx) + if 0 in ignore_idx[0]: + ignore_first = True + if len(block) - 1 in ignore_idx[0]: + ignore_last = True + + # If we want to ignore the first and last elements, do that here + if ignore_edges: + if len(block) > 3: + if not ignore_last: + block = block[0:-1] + if not ignore_first: + block = block[2:] + + meanval, medianval, stdevval = sigma_clipped_stats(block, sigma=sigma) + maxval = np.max(block) + minval = np.min(block) + else: + meanval, medianval, stdevval, maxval, minval = change_only_stats(self.data["dates"].data[index:self.blocks[i + 1]], + self.data["euvalues"].data[index:self.blocks[i + 1]], + sigma=sigma) + if np.isfinite(meanval): + medtimes.append(calc_median_time(self.data["dates"].data[index:self.blocks[i + 1]])) + means.append(meanval) + medians.append(medianval) + maxs.append(maxval) + mins.append(minval) + stdevs.append(stdevval) + else: + pass + + # If there were blocks composed entirely of bad data, meaning no mean values were + # calculated, remove those every change values and block values from the EdbMnemonic + # instance. + if every_change: + if len(remove_change_indexes) > 0: + self.every_change_values = np.delete(self.every_change_values, remove_change_indexes) + self.blocks = np.delete(self.blocks, remove_change_indexes) + + else: + # If the data are strings, then set the mean to be the data value at the block index + for i, index in enumerate(self.blocks[0:-1]): + # Protect against repeated block indexes + if index < self.blocks[i + 1]: + meanval = self.data["euvalues"].data[index] + medianval = meanval + stdevval = 0 + medtimes.append(calc_median_time(self.data["dates"].data[index:self.blocks[i + 1]])) + means.append(meanval) + medians.append(medianval) + stdevs.append(stdevval) + maxs.append(meanval) + mins.append(meanval) + #if hasattr(self, 'every_change_values'): + # updated_every_change_vals.append(self.every_change_values[i + 1]) + self.mean = means + self.median = medians + self.stdev = stdevs + self.median_times = medtimes + self.max = maxs + self.min = mins + + def block_stats_filter_positions(self, sigma=5): + """Calculate stats for a mnemonic where we want a mean value for + each block of good data, where blocks are separated by times where + the data are ignored. In this case, there are custom adjustments meant + to work on the MIRI filter position mnemonics (e.g. IMIR_HK_GW14_POS_RATIO, + IMIR_HK_FW_POS_RATIO). Parameters ---------- - show_plot : boolean - A switch to show the plot in the browser or not. + sigma : int + Number of sigma to use for sigma clipping + """ + means = [] + medians = [] + maxs = [] + mins = [] + stdevs = [] + medtimes = [] + remove_change_indexes = [] + if type(self.data["euvalues"].data[0]) not in [np.str_, str]: + for i, index in enumerate(self.blocks[0:-1]): + # Protect against repeated block indexes + if index < self.blocks[i + 1]: + if self.meta['TlmMnemonics'][0]['AllPoints'] != 0: + block = self.data["euvalues"].data[index:self.blocks[i + 1]] + filter_value = self.every_change_values[i] + pos_type = self.mnemonic_identifier.split('_')[2] + if pos_type not in MIRI_POS_RATIO_VALUES: + raise ValueError((f'Unrecognized filter position type: {pos_type} in {self.mnemonic_identifier}.' + f'Expected one of {MIRI_POS_RATIO_VALUES.keys()}')) + if filter_value not in MIRI_POS_RATIO_VALUES[pos_type]: + raise ValueError((f'Unrecognized filter value: {filter_value} in block {i} of {self.mnemonic_identifier}')) + + nominal_value, std_value = MIRI_POS_RATIO_VALUES[pos_type][filter_value] + max_value = nominal_value + sigma * std_value + min_value = nominal_value - sigma * std_value + + empty_block = False + good = np.where((block <= max_value) & (block >= min_value))[0] + if len(good) == 0: + empty_block = True + meanval, medianval, stdevval, maxval, minval = np.nan, np.nan, np.nan, np.nan, np.nan + + # If the block is composed entirely of data to be ignored, then we don't + # add new mean, median, max, min, stdev values, and we also need to remove + # the associated entry from self.every_change_values and self.blocks + # (only in the case of every_change data) + remove_change_indexes.append(i) + + else: + # If there are values to be ignored, remove those from the array + # of elements. Keep track of whether the first and last are ignored. + block = block[good] + meanval, medianval, stdevval = sigma_clipped_stats(block, sigma=sigma) + maxval = np.max(block) + minval = np.min(block) + + else: + meanval, medianval, stdevval, maxval, minval = change_only_stats(self.data["dates"].data[index:self.blocks[i + 1]], + self.data["euvalues"].data[index:self.blocks[i + 1]], + sigma=sigma) + if np.isfinite(meanval): + #this is preventing the nans above from being added. not sure what to do here. + #bokeh cannot deal with nans. but we need entries in order to have the blocks indexes + #remain correct. but maybe we dont care about the block indexes after averaging + medtimes.append(calc_median_time(self.data["dates"].data[index:self.blocks[i + 1]][good])) + means.append(meanval) + medians.append(medianval) + maxs.append(maxval) + mins.append(minval) + stdevs.append(stdevval) + + # If there were blocks composed entirely of bad data, meaning no mean values were + # calculated, remove those every change values and block values from the EdbMnemonic + # instance. + if len(remove_change_indexes) > 0: + self.every_change_values = np.delete(self.every_change_values, remove_change_indexes) + self.blocks = np.delete(self.blocks, remove_change_indexes) + + else: + # If the data are strings, then set the mean to be the data value at the block index + for i, index in enumerate(self.blocks[0:-1]): + # Protect against repeated block indexes + if index < self.blocks[i + 1]: + meanval = self.data["euvalues"].data[index] + medianval = meanval + stdevval = 0 + medtimes.append(calc_median_time(self.data["dates"].data[index:self.blocks[i + 1]])) + means.append(meanval) + medians.append(medianval) + stdevs.append(stdevval) + maxs.append(meanval) + mins.append(meanval) + + self.mean = means + self.median = medians + self.stdev = stdevs + self.median_times = medtimes + self.max = maxs + self.min = mins + + def bokeh_plot(self, show_plot=False, savefig=False, out_dir='./', nominal_value=None, yellow_limits=None, + red_limits=None, title=None, xrange=(None, None), yrange=(None, None), return_components=True, + return_fig=False, plot_data=True, plot_mean=False, plot_median=False, plot_max=False, plot_min=False): + """Make basic bokeh plot showing value as a function of time. Optionally add a line indicating + nominal (expected) value, as well as yellow and red background regions to denote values that + may be unexpected. + + Parameters + ---------- + show_plot : bool + If True, show plot on screen rather than returning div and script + + savefig : bool + If True, file is saved to html file + + out_dir : str + Directory into which the html file is saved + + nominal_value : float + Expected or nominal value for the telemetry. If provided, a horizontal dashed line + at this value will be added. + + yellow_limits : list + 2-element list giving the lower and upper limits outside of which the telemetry value + is considered non-nominal. If provided, the area of the plot between these two values + will be given a green background, and that outside of these limits will have a yellow + background. + + red_limits : list + 2-element list giving the lower and upper limits outside of which the telemetry value + is considered worse than in the yellow region. If provided, the area of the plot outside + of these two values will have a red background. + + title : str + Will be used as the plot title. If None, the mnemonic name and description (if present) + will be used as the title + + xrange : tuple + Tuple of min, max datetime values to use as the plot range in the x direction. + + yrange : tuple + Tuple of min, max datetime values to use as the plot range in the y direction. + + return_components : bool + If True, return the plot as div and script components + + return_fig : bool + If True, return the plot as a bokeh Figure object + + plot_data : bool + If True, plot the data in the EdbMnemonic.data table + + plot_mean : bool + If True, also plot the line showing the self.mean values + + plot_median : bool + If True, also plot the line showing the self.median values + + plot_max : bool + If True, also plot the line showing the self.max values + + plot_min : bool + If True, also plot the line showing the self.min values Returns ------- - [div, script] : list - List containing the div and js representations of figure. + obj : list or bokeh.plotting.figure + If return_components is True, return a list containing [div, script] + If return_figre is True, return the bokeh figure itself """ + # Make sure that only one output type is specified, or bokeh will get mad + options = np.array([show_plot, savefig, return_components, return_fig]) + if np.sum(options) > 1: + trues = np.where(options)[0] + raise ValueError((f'{options[trues]} are set to True in plot_every_change_data. Bokeh ' + 'will only allow one of these to be True.')) + + # yellow and red limits must come in pairs + if yellow_limits is not None: + if len(yellow_limits) != 2: + yellow_limits = None + if red_limits is not None: + if len(red_limits) != 2: + red_limits = None + + # If there are no data in the table, then produce an empty plot in the date + # range specified by the requested start and end time + if len(self.data["dates"]) == 0: + null_dates = [self.requested_start_time, self.requested_end_time] + null_vals = [0, 0] + source = ColumnDataSource(data={'x': null_dates, 'y': null_vals}) + else: + source = ColumnDataSource(data={'x': self.data['dates'], 'y': self.data['euvalues']}) - abscissa = self.data['dates'] - ordinate = self.data['euvalues'] - - p1 = figure(tools='pan,box_zoom,reset,wheel_zoom,save', x_axis_type='datetime', - title=self.mnemonic_identifier, x_axis_label='Time', - y_axis_label='Value ({})'.format(self.info['unit'])) - p1.line(abscissa, ordinate, line_width=1, line_color='blue', line_dash='dashed') - p1.circle(abscissa, ordinate, color='blue') + if savefig: + filename = os.path.join(out_dir, f"telem_plot_{self.mnemonic_identifier.replace(' ','_')}.html") - if show_plot: - show(p1) + if self.info is None: + units = 'Unknown' else: - script, div = components(p1) + units = self.info["unit"] + + # Create a useful plot title if necessary + if title is None: + if 'description' in self.info: + if len(self.info['description']) > 0: + title = f'{self.mnemonic_identifier} - {self.info["description"]}' + else: + title = self.mnemonic_identifier + else: + title = self.mnemonic_identifier + + fig = figure(tools='pan,box_zoom,reset,wheel_zoom,save', x_axis_type='datetime', + title=title, x_axis_label='Time', y_axis_label=f'{units}') + + # For cases where the plot is empty or contains only a single point, force the + # plot range to something reasonable + if len(self.data["dates"]) < 2: + fig.x_range = Range1d(self.requested_start_time - timedelta(days=1), self.requested_end_time) + bottom, top = (-1, 1) + if yellow_limits is not None: + bottom, top = yellow_limits + if red_limits is not None: + bottom, top = red_limits + fig.y_range = Range1d(bottom, top) + + if plot_data: + data = fig.scatter(x='x', y='y', line_width=1, line_color='blue', source=source) + data_line = fig.line(x='x', y='y', line_width=1, line_color='blue', source=source) + hover_tool = HoverTool(tooltips=[('Value', '@y'), + ('Date', '@x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[data]) + hover_tool.formatters = {'@x': 'datetime'} + fig.tools.append(hover_tool) + + # Plot the mean value over time + if len(self.median_times) > 0: + if self.median_times[0] is not None: + if plot_mean: + source_mean = ColumnDataSource(data={'mean_x': self.median_times, 'mean_y': self.mean}) + mean_data = fig.scatter(x='mean_x', y='mean_y', line_width=1, line_color='orange', alpha=0.75, source=source_mean) + mean_hover_tool = HoverTool(tooltips=[('Mean', '@mean_y'), + ('Date', '@mean_x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[mean_data]) + mean_hover_tool.formatters = {'@mean_x': 'datetime'} + fig.tools.append(mean_hover_tool) + + if plot_median: + source_median = ColumnDataSource(data={'median_x': self.median_times, 'median_y': self.median}) + median_data = fig.scatter(x='median_x', y='median_y', line_width=1, line_color='orangered', alpha=0.75, source=source_median) + median_hover_tool = HoverTool(tooltips=[('Median', '@median_y'), + ('Date', '@median_x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[median_data]) + median_hover_tool.formatters = {'@median_x': 'datetime'} + fig.tools.append(median_hover_tool) + + # If the max and min arrays are to be plotted, create columndata sources for them as well + if plot_max: + source_max = ColumnDataSource(data={'max_x': self.median_times, 'max_y': self.max}) + max_data = fig.scatter(x='max_x', y='max_y', line_width=1, color='black', line_color='black', source=source_max) + max_hover_tool = HoverTool(tooltips=[('Max', '@max_y'), + ('Date', '@max_x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[max_data]) + max_hover_tool.formatters = {'@max_x': 'datetime'} + fig.tools.append(max_hover_tool) + + if plot_min: + source_min = ColumnDataSource(data={'min_x': self.median_times, 'min_y': self.min}) + min_data = fig.scatter(x='min_x', y='min_y', line_width=1, color='black', line_color='black', source=source_min) + minn_hover_tool = HoverTool(tooltips=[('Min', '@min_y'), + ('Date', '@min_x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[min_data]) + min_hover_tool.formatters = {'@min_x': 'datetime'} + fig.tools.append(min_hover_tool) + + if len(self.data["dates"]) == 0: + data.visible = False + if nominal_value is not None: + fig.line(null_dates, np.repeat(nominal_value, len(null_dates)), color='black', + line_dash='dashed', alpha=0.5) + else: + # If there is a nominal value provided, plot a dashed line for it + if nominal_value is not None: + fig.line(self.data['dates'], np.repeat(nominal_value, len(self.data['dates'])), color='black', + line_dash='dashed', alpha=0.5) + + # If limits for warnings/errors are provided, create colored background boxes + if yellow_limits is not None or red_limits is not None: + fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) + + # Make the x axis tick labels look nice + fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], + seconds=["%d %b %H:%M:%S.%3N"], + hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"] + ) + fig.xaxis.major_label_orientation = np.pi / 4 + + # Force the axes' range if requested + if xrange[0] is not None: + fig.x_range.start = xrange[0].timestamp() * 1000. + if xrange[1] is not None: + fig.x_range.end = xrange[1].timestamp() * 1000. + if yrange[0] is not None: + fig.y_range.start = yrange[0] + if yrange[1] is not None: + fig.y_range.end = yrange[1] + + if savefig: + output_file(filename=filename, title=self.mnemonic_identifier) + save(fig) + if show_plot: + show(fig) + if return_components: + script, div = components(fig) return [div, script] + if return_fig: + return fig def bokeh_plot_text_data(self, show_plot=False): """Make basic bokeh plot showing value as a function of time. @@ -192,8 +815,120 @@ def bokeh_plot_text_data(self, show_plot=False): return [div, script] + def change_only_add_points(self): + """Tweak change-only data. Add an additional data point immediately prior to + each original data point, with a value equal to that in the previous data point. + This will help with filtering data based on conditions later, and will create a + plot that looks more realistic, with only horizontal and vertical lines. + """ + new_dates = [self.data["dates"][0]] + new_vals = [self.data["euvalues"][0]] + delta_t = timedelta(microseconds=1) + for i, row in enumerate(self.data["dates"][1:]): + new_dates.append(self.data["dates"][i + 1] - delta_t) + new_vals.append(self.data["euvalues"][i]) + new_dates.append(self.data["dates"][i + 1]) + new_vals.append(self.data["euvalues"][i + 1]) + new_table = Table() + new_table["dates"] = new_dates + new_table["euvalues"] = new_vals + self.data = new_table + + # Update the metadata to say that this is no longer change-only data + self.meta['TlmMnemonics'][0]['AllPoints'] = 1 + + def daily_stats(self, sigma=3): + """Calculate the statistics for each day in the data + contained in data["data"]. Should we add a check for a + case where the final block of time is <<1 day? + + Parameters + ---------- + sigma : int + Number of sigma to use for sigma clipping + """ + if len(self.data["euvalues"]) == 0: + self.mean = [] + self.median = [] + self.stdev = [] + self.median_times = [] + self.max = [] + self.min = [] + else: + if type(self.data["euvalues"].data[0]) not in [np.str_, str]: + min_date = np.min(self.data["dates"]) + date_range = np.max(self.data["dates"]) - min_date + num_days = date_range.days + num_seconds = date_range.seconds + range_days = num_days + 1 + + # Generate a list of times to use as boundaries for calculating means + limits = np.array([min_date + timedelta(days=x) for x in range(range_days)]) + limits = np.append(limits, np.max(self.data["dates"])) + + means, meds, devs, maxs, mins, times = [], [], [], [], [], [] + for i in range(len(limits) - 1): + good = np.where((self.data["dates"] >= limits[i]) & (self.data["dates"] < limits[i + 1])) + + if self.meta['TlmMnemonics'][0]['AllPoints'] != 0: + avg, med, dev = sigma_clipped_stats(self.data["euvalues"][good], sigma=sigma) + maxval = np.max(self.data["euvalues"][good]) + minval = np.min(self.data["euvalues"][good]) + else: + avg, med, dev, maxval, minval = change_only_stats(self.data["dates"][good], self.data["euvalues"][good], sigma=sigma) + means.append(avg) + meds.append(med) + maxs.append(maxval) + mins.append(minval) + devs.append(dev) + times.append(limits[i] + (limits[i + 1] - limits[i]) / 2.) + self.mean = means + self.median = meds + self.stdev = devs + self.median_times = times + self.max = maxs + self.min = mins + else: + # If the mnemonic data are strings, we don't compute statistics + self.mean = [] + self.median = [] + self.stdev = [] + self.median_times = [] + self.max = [] + self.min = [] + + def full_stats(self, sigma=3): + """Calculate the mean/median/stdev of the full compliment of data + + Parameters + ---------- + sigma : int + Number of sigma to use for sigma clipping + """ + if type(self.data["euvalues"].data[0]) not in [np.str_, str]: + if self.meta['TlmMnemonics'][0]['AllPoints'] != 0: + self.mean, self.median, self.stdev = sigma_clipped_stats(self.data["euvalues"], sigma=sigma) + self.max = np.max(self.data["euvalues"]) + self.min = np.min(self.data["euvalues"]) + else: + self.mean, self.median, self.stdev, self.max, self.min = change_only_stats(self.data["dates"], self.data["euvalues"], sigma=sigma) + self.mean = [self.mean] + self.median = [self.median] + self.stdev = [self.stdev] + self.max = [self.max] + self.min = [self.min] + self.median_times = [calc_median_time(self.data["dates"])] + else: + # If the mnemonic values are strings, don't compute statistics + self.mean = [] + self.median = [] + self.stdev = [] + self.max = [] + self.min = [] + self.median_times = [] + def get_table_data(self): - """Get data needed to make interactivate table in template.""" + """Get data needed to make interactive table in template.""" # generate tables for display and download in web app display_table = copy.deepcopy(self.data) @@ -209,9 +944,630 @@ def get_table_data(self): return html_file_content + def interpolate(self, times): + """Interpolate data euvalues at specified datetimes. + + Parameters + ---------- + times : list + List of datetime objects describing the times to interpolate to + """ + new_tab = Table() + + # Change-only data is unique and needs its own way to be interpolated + if self.meta['TlmMnemonics'][0]['AllPoints'] == 0: + new_values = [] + new_dates = [] + for time in times: + latest = np.where(self.data["dates"] <= time)[0] + if len(latest) > 0: + new_values.append(self.data["euvalues"][latest[-1]]) + new_dates.append(time) + if len(new_values) > 0: + new_tab["euvalues"] = np.array(new_values) + new_tab["dates"] = np.array(new_dates) + + # This is for non change-only data + else: + # We can only linearly interpolate if we have more than one entry + if len(self.data["dates"]) >= 2: + interp_times = np.array([create_time_offset(ele, self.data["dates"][0]) for ele in times]) + mnem_times = np.array([create_time_offset(ele, self.data["dates"][0]) for ele in self.data["dates"]]) + + # Do not extrapolate. Any requested interoplation times that are outside the range + # or the original data will be ignored. + good_times = ((interp_times >= mnem_times[0]) & (interp_times <= mnem_times[-1])) + interp_times = interp_times[good_times] + + new_tab["euvalues"] = np.interp(interp_times, mnem_times, self.data["euvalues"]) + new_tab["dates"] = np.array([add_time_offset(ele, self.data["dates"][0]) for ele in interp_times]) + + else: + # If there are not enough data and we are unable to interpolate, + # then set the data table to be empty + new_tab["euvalues"] = np.array([]) + new_tab["dates"] = np.array([]) + + # Adjust any block values to account for the interpolated data + new_blocks = [] + if self.blocks is not None: + for index in self.blocks[0:-1]: + good = np.where(new_tab["dates"] >= self.data["dates"][index])[0] + + if len(good) > 0: + new_blocks.append(good[0]) + + # Add en entry for the final element if it's not already there + if len(new_blocks) > 0: + if new_blocks[-1] < len(new_tab["dates"]): + new_blocks.append(len(new_tab["dates"])) + self.blocks = np.array(new_blocks) + + # Update the data in the instance. + self.data = new_tab + + def plot_data_plus_devs(self, use_median=False, show_plot=False, savefig=False, out_dir='./', nominal_value=None, yellow_limits=None, + red_limits=None, xrange=(None, None), yrange=(None, None), title=None, return_components=True, + return_fig=False, plot_max=False, plot_min=False): + """Make basic bokeh plot showing value as a function of time. Optionally add a line indicating + nominal (expected) value, as well as yellow and red background regions to denote values that + may be unexpected. Also add a plot of the mean value over time and in a second figure, a plot of + the devaition from the mean. + + Parameters + ---------- + use_median : bool + If True, plot the median rather than the mean, as well as the deviation from the + median rather than from the mean + + show_plot : bool + If True, show plot on screen rather than returning div and script + + savefig : bool + If True, file is saved to html file + + out_dir : str + Directory into which the html file is saved + + nominal_value : float + Expected or nominal value for the telemetry. If provided, a horizontal dashed line + at this value will be added. + + yellow_limits : list + 2-element list giving the lower and upper limits outside of which the telemetry value + is considered non-nominal. If provided, the area of the plot between these two values + will be given a green background, and that outside of these limits will have a yellow + background. + + red_limits : list + 2-element list giving the lower and upper limits outside of which the telemetry value + is considered worse than in the yellow region. If provided, the area of the plot outside + of these two values will have a red background. + + xrange : tuple + Tuple of min, max datetime values to use as the plot range in the x direction. + + yrange : tuple + Tuple of min, max datetime values to use as the plot range in the y direction. + + title : str + Will be used as the plot title. If None, the mnemonic name and description (if present) + will be used as the title + + return_components : bool + If True, return the plot as div and script components + + return_fig : bool + If True, return the plot as a bokeh Figure object + + plot_max : bool + If True, also plot the line showing the self.max values + + plot_min : bool + If True, also plot the line showing the self.min values + + Returns + ------- + obj : list or bokeh.plotting.figure + If return_components is True, return a list containing [div, script] + If return_figre is True, return the bokeh figure itself + """ + # Make sure that only one output type is specified, or bokeh will get mad + options = np.array([show_plot, savefig, return_components, return_fig]) + if np.sum(options) > 1: + trues = np.where(options)[0] + raise ValueError((f'{options[trues]} are set to True in plot_every_change_data. Bokeh ' + 'will only allow one of these to be True.')) + + # If there are no data in the table, then produce an empty plot in the date + # range specified by the requested start and end time + if len(self.data["dates"]) == 0: + null_dates = [self.requested_start_time, self.requested_end_time] + null_vals = [0, 0] + data_dates = null_dates + data_vals = null_vals + else: + data_dates = self.data['dates'] + data_vals = self.data['euvalues'] + source = ColumnDataSource(data={'x': data_dates, 'y': data_vals}) + + # yellow and red limits must come in pairs + if yellow_limits is not None: + if len(yellow_limits) != 2: + yellow_limits = None + if red_limits is not None: + if len(red_limits) != 2: + red_limits = None + + if savefig: + filename = os.path.join(out_dir, f"telem_plot_{self.mnemonic_identifier.replace(' ','_')}.html") + + if self.info is None: + units = 'Unknown' + else: + units = self.info["unit"] + + # Create a useful plot title if necessary + if title is None: + if 'description' in self.info: + if len(self.info['description']) > 0: + title = f'{self.mnemonic_identifier} - {self.info["description"]}' + else: + title = self.mnemonic_identifier + else: + title = self.mnemonic_identifier + + fig = figure(tools='pan,box_zoom,reset,wheel_zoom,save', x_axis_type=None, + title=title, x_axis_label='Time', + y_axis_label=f'{units}') + + # For cases where the plot is empty or contains only a single point, force the + # plot range to something reasonable + if len(self.data["dates"]) < 2: + fig.x_range = Range1d(self.requested_start_time - timedelta(days=1), self.requested_end_time) + bottom, top = (-1, 1) + if yellow_limits is not None: + bottom, top = yellow_limits + if red_limits is not None: + bottom, top = red_limits + fig.y_range = Range1d(bottom, top) + + data = fig.scatter(x='x', y='y', line_width=1, line_color='blue', source=source) + + # Plot the mean value over time + if len(self.median_times) > 0: + if self.median_times[0] is not None: + if use_median: + meanvals = self.median + else: + meanvals = self.mean + + mean_data = fig.line(self.median_times, meanvals, line_width=1, line_color='orange', alpha=0.75) + + # If the max and min arrays are to be plotted, create columndata sources for them as well + if plot_max: + source_max = ColumnDataSource(data={'max_x': self.median_times, 'max_y': self.max}) + fig.scatter(x='max_x', y='max_y', line_width=1, line_color='black', source=source_max) + + if plot_min: + source_min = ColumnDataSource(data={'min_x': self.median_times, 'min_y': self.min}) + fig.scatter(x='min_x', y='min_y', line_width=1, line_color='black', source=source_min) + + if len(self.data["dates"]) == 0: + data.visible = False + if nominal_value is not None: + fig.line(null_dates, np.repeat(nominal_value, len(null_dates)), color='black', + line_dash='dashed', alpha=0.5) + else: + # If there is a nominal value provided, plot a dashed line for it + if nominal_value is not None: + fig.line(self.data['dates'], np.repeat(nominal_value, len(self.data['dates'])), color='black', + line_dash='dashed', alpha=0.5) + + # If limits for warnings/errors are provided, create colored background boxes + if yellow_limits is not None or red_limits is not None: + fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) + + hover_tool = HoverTool(tooltips=[('Value', '@y'), + ('Date', '@x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[data]) + hover_tool.formatters = {'@x': 'datetime'} + + fig.tools.append(hover_tool) + + # Force the axes' range if requested + if xrange[0] is not None: + fig.x_range.start = xrange[0].timestamp() * 1000. + if xrange[1] is not None: + fig.x_range.end = xrange[1].timestamp() * 1000. + if yrange[0] is not None: + fig.y_range.start = yrange[0] + if yrange[1] is not None: + fig.y_range.end = yrange[1] + + # Now create a second plot showing the devitation from the mean + fig_dev = figure(height=250, x_range=fig.x_range, tools="xpan,xwheel_zoom,xbox_zoom,reset", y_axis_location="left", + x_axis_type='datetime', x_axis_label='Time', y_axis_label=f'Data - Mean ({units})') + + # Interpolate the mean values so that we can subtract the original data + if len(self.median_times) > 1: + interp_means = interpolate_datetimes(data_dates, self.median_times, meanvals) + dev = data_vals - interp_means + elif len(self.median_times) == 1: + if self.median_times[0] is not None: + dev = data_vals - meanvals + else: + dev = [0] * len(data_vals) + else: + # No median data, so we can't calculate any deviation + dev = [0] * len(data_vals) + + # Plot + fig_dev.line(data_dates, dev, color='red') + + # Make the x axis tick labels look nice + fig_dev.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], + seconds=["%d %b %H:%M:%S.%3N"], + hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"] + ) + fig.xaxis.major_label_orientation = np.pi / 4 + + # Place the two figures in a column object + bothfigs = column(fig, fig_dev) + + if savefig: + output_file(filename=filename, title=self.mnemonic_identifier) + save(bothfigs) + + if show_plot: + show(bothfigs) + if return_components: + script, div = components(bothfigs) + return [div, script] + if return_fig: + return bothfigs + + def save_table(self, outname): + """Save the EdbMnemonic instance + + Parameters + ---------- + outname : str + Name of text file to save information into + """ + ascii.write(self.data, outname, overwrite=True) + + def timed_stats(self, sigma=3): + """Break up the data into chunks of the given duration. Calculate the + mean value for each chunk. + + Parameters + ---------- + sigma : int + Number of sigma to use in sigma-clipping + """ + if type(self.data["euvalues"].data[0]) not in [np.str_, str]: + duration_secs = self.mean_time_block.to('second').value + date_arr = np.array(self.data["dates"]) + num_bins = (np.max(self.data["dates"]) - np.min(self.data["dates"])).total_seconds() / duration_secs + + # Round up to the next integer if there is a fractional number of bins + num_bins = np.ceil(num_bins) + + self.mean = [] + self.median = [] + self.max = [] + self.min = [] + self.stdev = [] + self.median_times = [] + for i in range(int(num_bins)): + min_date = self.data["dates"][0] + timedelta(seconds=i * duration_secs) + max_date = min_date + timedelta(seconds=duration_secs) + good = ((date_arr >= min_date) & (date_arr < max_date)) + if self.meta['TlmMnemonics'][0]['AllPoints'] != 0: + avg, med, dev = sigma_clipped_stats(self.data["euvalues"][good], sigma=sigma) + maxval = np.max(self.data["euvalues"][good]) + minval = np.min(self.data["euvalues"][good]) + else: + avg, med, dev, maxval, minval = change_only_stats(self.data["dates"][good], self.data["euvalues"][good], sigma=sigma) + if np.isfinite(avg): + self.mean.append(avg) + self.median.append(med) + self.stdev.append(dev) + self.max.append(maxval) + self.min.append(minval) + self.median_times.append(calc_median_time(self.data["dates"].data[good])) + else: + self.mean = [] + self.median = [] + self.stdev = [] + self.max = [] + self.min = [] + self.median_times = [] + + +def add_limit_boxes(fig, yellow=None, red=None): + """Add green/yellow/red background colors + + Parameters + ---------- + fig : bokeh.plotting.figure + Bokeh figure of the telemetry values + + yellow : list + 2-element list of [low, high] values. If provided, the areas of the plot less than + and greater than will be given a yellow background, to indicate an area + of concern. + + red : list + 2-element list of [low, high] values. If provided, the areas of the plot less than + and greater than will be given a red background, to indicate values that + may indicate an error. It is assumed that the low value of red is less + than the low value of yellow, and that the high value of red is + greater than the high value of yellow. + + Returns + ------- + fig : bokeh.plotting.figure + Modified figure with BoxAnnotations added + """ + if yellow is not None: + green = BoxAnnotation(bottom=yellow[0], top=yellow[1], fill_color='chartreuse', fill_alpha=0.2) + fig.add_layout(green) + if red is not None: + yellow_high = BoxAnnotation(bottom=yellow[1], top=red[1], fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_high) + yellow_low = BoxAnnotation(bottom=red[0], top=yellow[0], fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_low) + red_high = BoxAnnotation(bottom=red[1], top=red[1] + 100, fill_color='red', fill_alpha=0.1) + fig.add_layout(red_high) + red_low = BoxAnnotation(bottom=red[0] - 100, top=red[0], fill_color='red', fill_alpha=0.1) + fig.add_layout(red_low) + + else: + yellow_high = BoxAnnotation(bottom=yellow[1], top=yellow[1] + 100, fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_high) + yellow_low = BoxAnnotation(bottom=yellow[0] - 100, top=yellow[0], fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_low) + + else: + if red is not None: + green = BoxAnnotation(bottom=red[0], top=red[1], fill_color='chartreuse', fill_alpha=0.2) + fig.add_layout(green) + red_high = BoxAnnotation(bottom=red[1], top=red[1] + 100, fill_color='red', fill_alpha=0.1) + fig.add_layout(red_high) + red_low = BoxAnnotation(bottom=red[0] - 100, top=red[0], fill_color='red', fill_alpha=0.1) + fig.add_layout(red_low) + + return fig + + +def add_time_offset(offset, dt_obj): + """Add an offset to an input datetime object + + Parameters + ---------- + offset : float + Number of seconds to be added + + dt_obj : datetime.datetime + Datetime object to which the seconds are added + + Returns + ------- + obj : datetime.datetime + Sum of the input datetime objects and the offset seconds. + """ + return dt_obj + timedelta(seconds=offset) + + +def calc_median_time(time_arr): + """Calcualte the median time of the input time_arr + + Parameters + ---------- + time_arr : numpy.ndarray + 1D array of datetime objects + + Returns + ------- + med_time : datetime.datetime + Median time, as a datetime object + """ + if len(time_arr) > 0: + med_time = time_arr[0] + ((time_arr[-1] - time_arr[0]) / 2.) + else: + med_time = np.nan + return med_time + + +def change_only_bounding_points(date_list, value_list, starttime, endtime): + """For data containing change-only values, where bracketing data outside + the requested time span may be present, create data points at the starting + and ending times. This can be helpful with later interpolations. + + Parameters + ---------- + date_list : list + List of datetime values + + value_list : list + List of corresponding mnemonic values + + starttime : datetime.datetime + Start time + + endtime : datetime.datetime + End time + + Returns + ------- + date_list : list + List of datetime values + + value_list : list + List of corresponding mnemonic values + """ + date_list_arr = np.array(date_list) + + if isinstance(starttime, Time): + starttime = starttime.datetime + + if isinstance(endtime, Time): + endtime = endtime.datetime + + valid_idx = np.where((date_list_arr <= endtime) & (date_list_arr >= starttime))[0] + before_startime = np.where(date_list_arr < starttime)[0] + before_endtime = np.where(date_list_arr < endtime)[0] + + # The value at starttime is either the value of the last point before starttime, + # or NaN if there are no points prior to starttime + if len(before_startime) == 0: + value0 = np.nan + else: + value0 = value_list[before_startime[-1]] + + # The value at endtime is NaN if there are no times before the endtime. + # Otherwise the value is equal to the value at the last point before endtime + if len(before_endtime) == 0: + value_end = np.nan + else: + value_end = value_list[before_endtime[-1]] + + # Crop the arrays down to the times between starttime and endtime + date_list = list(np.array(date_list)[valid_idx]) + value_list = list(np.array(value_list)[valid_idx]) + + # Add an entry for starttime and another for endtime, but not if + # the values are NaN + if isinstance(value0, Number): + if not np.isnan(value0): + date_list.insert(0, starttime) + value_list.insert(0, value0) + elif isinstance(value0, str): + date_list.insert(0, starttime) + value_list.insert(0, value0) + + if isinstance(value_end, Number): + if not np.isnan(value_end): + date_list.append(endtime) + value_list.append(value_end) + elif isinstance(value_end, str): + date_list.append(endtime) + value_list.append(value_end) + + return date_list, value_list + + +def change_only_stats(times, values, sigma=3): + """Calculate the mean/median/stdev as well as the median time for a + collection of change-only data. + + Parameters + ---------- + times : list + List of datetime objects + + values : list + List of values corresponding to times + + sigma : float + Number of sigma to use for sigma-clipping + + Returns + ------- + meanval : float + Mean of values + + medianval : float + Median of values + + stdevval : float + Standard deviation of values + """ + # If there is only a single datapoint, then the mean will be + # equal to it. + if len(times) == 0: + return None, None, None, None, None + if len(times) == 1: + return values, values, 0., values, values + else: + times = np.array(times) + values = np.array(values) + delta_time = times[1:] - times[0:-1] + delta_time_weight = np.array([e.total_seconds() for e in delta_time]) + + # Add weight for the final point. Set it to 1 microsecond + delta_time_weight = np.append(delta_time_weight, 1e-6) + + meanval = np.average(values, weights=delta_time_weight) + stdevval = np.sqrt(np.average((values - meanval) ** 2, weights=delta_time_weight)) + maxval = np.max(values) + minval = np.min(values) + + # In order to calculate the median, we need to adjust the weights such that + # the weight represents the number of times a given value is present. Scale + # it so that the minimum weight is 1 + delta_time_weight = (delta_time_weight / np.min(delta_time_weight)).astype(int) + + # Now we find the median by sorting the values, keeping a running total of the + # total number of entries given that each value will have a number of instances + # dictat= midpt: + if odd: + medianval = val + else: + if total_idx > midpt: + medianval = val + else: + medianval = (val + values[i + 1]) / 2. + break + + return meanval, medianval, stdevval, maxval, minval + + +def create_time_offset(dt_obj, epoch): + """Subtract input epoch from a datetime object and return the + residual number of seconds + + Parameters + ---------- + dt_obj : datetime.datetime + Original datetiem object + + epoch : datetime.datetime + Datetime to be subtracted from dt_obj + + Returns + ------- + obj : float + Number of seconds between dt_obj and epoch + """ + if isinstance(dt_obj, Time): + return (dt_obj - epoch).to(u.second).value + elif isinstance(dt_obj, datetime): + return (dt_obj - epoch).total_seconds() + def get_mnemonic(mnemonic_identifier, start_time, end_time): - """Execute query and return a ``EdbMnemonic`` instance. + """Execute query and return an ``EdbMnemonic`` instance. The underlying MAST service returns data that include the datapoint preceding the requested start time and the datapoint @@ -221,9 +1577,11 @@ def get_mnemonic(mnemonic_identifier, start_time, end_time): ---------- mnemonic_identifier : str Telemetry mnemonic identifiers, e.g. ``SA_ZFGOUTFOV`` - start_time : astropy.time.Time instance + + start_time : astropy.time.Time or datetime.datetime Start time - end_time : astropy.time.Time instance + + end_time : astropy.time.Time or datetime.datetime End time Returns @@ -231,21 +1589,48 @@ def get_mnemonic(mnemonic_identifier, start_time, end_time): mnemonic : instance of EdbMnemonic EdbMnemonic object containing query results """ - base_url = get_mast_base_url() - service = ENGDB_Service(base_url) # By default, will use the public MAST service. - data = service.get_values(mnemonic_identifier, start_time, end_time, include_obstime=True) + meta = service.get_meta(mnemonic_identifier) + # If the mnemonic is stored as change-only data, then include bracketing values + # outside of the requested start and stop times. These may be needed later to + # translate change-only data into all-points data. + if meta['TlmMnemonics'][0]['AllPoints'] == 0: + bracket = True + else: + bracket = False + + data = service.get_values(mnemonic_identifier, start_time, end_time, include_obstime=True, + include_bracket_values=bracket) + dates = [datetime.strptime(row.obstime.iso, "%Y-%m-%d %H:%M:%S.%f") for row in data] values = [row.value for row in data] + if bracket: + # For change-only data, check to see how many additional data points there are before + # the requested start time and how many are after the requested end time. Note that + # the max for this should be 1, but it's also possible to have zero (e.g. if you are + # querying up through the present and there are no more recent data values.) Use these + # to produce entries at the beginning and ending of the queried time range. + if len(dates) > 0: + dates, values = change_only_bounding_points(dates, values, start_time, end_time) + data = Table({'dates': dates, 'euvalues': values}) info = get_mnemonic_info(mnemonic_identifier) - # create and return instance + # Create and return instance mnemonic = EdbMnemonic(mnemonic_identifier, start_time, end_time, data, meta, info) + + # Convert change-only data to "regular" data. If this is not done, checking for + # dependency conditions may not work well if there are a limited number of points. + # Also, later interpolations won't be correct with change-only points since we are + # doing linear interpolation. + if bracket: + if len(mnemonic) > 0: + mnemonic.change_only_add_points() + return mnemonic @@ -268,7 +1653,6 @@ def get_mnemonics(mnemonics, start_time, end_time): Dictionary. keys are the queried mnemonics, values are instances of EdbMnemonic """ - if not isinstance(mnemonics, (list, np.ndarray)): raise RuntimeError('Please provide a list/array of mnemonic_identifiers') @@ -293,11 +1677,43 @@ def get_mnemonic_info(mnemonic_identifier): info : dict Object that contains the returned data """ - mast_token = get_mast_token() return query_mnemonic_info(mnemonic_identifier, token=mast_token) +def interpolate_datetimes(new_times, old_times, old_data): + """interpolate a set of datetime/value pairs onto another set + of datetime objects + + Parameters + ---------- + new_times : numpy.ndarray + Array of datetime objects onto which the data will be interpolated + + old_times : numpy.ndarray + Array of datetime objects associated with the input data values + + old_data : numpy.ndarray + Array of data values associated with ``old_times``, which will be + interpolated onto ``new_times`` + + Returns + ------- + new_data : numpy.ndarray + Array of values interpolated onto ``new_times`` + """ + # We can only linearly interpolate if we have more than one entry + if len(old_data) >= 2: + interp_times = np.array([create_time_offset(ele, old_times[0]) for ele in new_times]) + mnem_times = np.array([create_time_offset(ele, old_times[0]) for ele in old_times]) + new_data = np.interp(interp_times, mnem_times, old_data) + else: + # If there are not enough data and we are unable to interpolate, + # then set the data table to be empty + new_data = np.array([]) + return new_data + + def is_valid_mnemonic(mnemonic_identifier): """Determine if the given string is a valid EDB mnemonic. @@ -311,7 +1727,6 @@ def is_valid_mnemonic(mnemonic_identifier): bool Is mnemonic_identifier a valid EDB mnemonic? """ - inventory = mnemonic_inventory()[0] if mnemonic_identifier in inventory['tlmMnemonic']: return True @@ -332,7 +1747,6 @@ def mnemonic_inventory(): meta : dict Additional information returned by the query. """ - out = Mast.service_request_async(MAST_EDB_MNEMONIC_SERVICE, {}) data, meta = process_mast_service_request_result(out) @@ -359,7 +1773,6 @@ def process_mast_service_request_result(result, data_as_table=True): meta : dict Additional information returned by the query """ - json_data = result[0].json() if json_data['status'] != 'COMPLETE': raise RuntimeError('Mnemonic query did not complete.\nquery status: {}\nmessage: {}'.format( @@ -370,7 +1783,11 @@ def process_mast_service_request_result(result, data_as_table=True): if data_as_table: data = Table(json_data['data']) else: - data = json_data['data'][0] + if len(json_data['data']) > 0: + data = json_data['data'][0] + else: + warnings.warn('Query did not return any data. Returning None') + return None, None except KeyError: warnings.warn('Query did not return any data. Returning None') return None, None @@ -399,7 +1816,6 @@ def query_mnemonic_info(mnemonic_identifier, token=None): info : dict Object that contains the returned data """ - parameters = {"mnemonic": "{}".format(mnemonic_identifier)} result = Mast.service_request_async(MAST_EDB_DICTIONARY_SERVICE, parameters) info = process_mast_service_request_result(result, data_as_table=False)[0] diff --git a/jwql/example_config.json b/jwql/example_config.json index 58e8e94b9..937a1d63b 100644 --- a/jwql/example_config.json +++ b/jwql/example_config.json @@ -1,8 +1,6 @@ { "admin_account" : "", "auth_mast" : "", - "client_id" : "", - "client_secret" : "", "connection_string" : "", "database" : { "engine" : "", @@ -12,6 +10,14 @@ "host" : "", "port" : "" }, + "django_database" : { + "ENGINE" : "", + "NAME" : "", + "USER" : "", + "PASSWORD" : "", + "HOST" : "", + "PORT" : "" + }, "jwql_dir" : "", "jwql_version": "", "server_type": "", @@ -26,5 +32,8 @@ "test_data" : "", "test_dir" : "", "thumbnail_filesystem" : "", - "cores" : "" + "cores" : "", + "redis_host": "", + "redis_port": "", + "transfer_dir": "" } diff --git a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py index 8689853e3..cc4b7fb68 100755 --- a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py +++ b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py @@ -81,10 +81,13 @@ ``MIR_DARK`` """ +from collections import defaultdict from copy import deepcopy import datetime +from glob import glob import logging import os +from time import sleep from astropy.io import ascii, fits from astropy.time import Time @@ -92,26 +95,38 @@ from jwst_reffiles.bad_pixel_mask import bad_pixel_mask import numpy as np -from jwql.database.database_interface import session +from jwql.database.database_interface import engine, session from jwql.database.database_interface import NIRCamBadPixelQueryHistory, NIRCamBadPixelStats from jwql.database.database_interface import NIRISSBadPixelQueryHistory, NIRISSBadPixelStats from jwql.database.database_interface import MIRIBadPixelQueryHistory, MIRIBadPixelStats from jwql.database.database_interface import NIRSpecBadPixelQueryHistory, NIRSpecBadPixelStats from jwql.database.database_interface import FGSBadPixelQueryHistory, FGSBadPixelStats from jwql.instrument_monitors import pipeline_tools +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline from jwql.utils import crds_tools, instrument_properties, monitor_utils +from jwql.utils.constants import DARKS_BAD_PIXEL_TYPES, DARK_EXP_TYPES, FLATS_BAD_PIXEL_TYPES, FLAT_EXP_TYPES from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.constants import FLAT_EXP_TYPES, DARK_EXP_TYPES from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.mast_utils import mast_query from jwql.utils.permissions import set_permissions -from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path +from jwql.utils.utils import copy_files, create_png_from_fits, ensure_dir_exists, get_config, filesystem_path + +# Determine if the code is being run by Github Actions +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + +# Determine if the code is being run as part of a Readthedocs build +ON_READTHEDOCS = False +if 'READTHEDOCS' in os.environ: # pragma: no cover + ON_READTHEDOCS = os.environ['READTHEDOCS'] + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + from jwql.website.apps.jwql.monitor_pages.monitor_bad_pixel_bokeh import BadPixelPlots THRESHOLDS_FILE = os.path.join(os.path.split(__file__)[0], 'bad_pixel_file_thresholds.txt') def bad_map_to_list(badpix_image, mnemonic): - """Given an DQ image and a bad pixel mnemonic, create a list of + """Given a DQ image and a bad pixel mnemonic, create a list of (x,y) locations of this type of bad pixel in ``badpix_image`` Parameters @@ -426,7 +441,8 @@ def add_bad_pix(self, coordinates, pixel_type, files, obs_start_time, obs_mid_ti 'obs_end_time': obs_end_time, 'baseline_file': baseline_file, 'entry_date': datetime.datetime.now()} - self.pixel_table.__table__.insert().execute(entry) + with engine.begin() as connection: + connection.execute(self.pixel_table.__table__.insert(), entry) def filter_query_results(self, results, datatype): """Filter MAST query results. For input flats, keep only those @@ -696,7 +712,7 @@ def most_recent_search(self, file_type='dark'): run_field = self.query_table.run_bpix_from_flats query = session.query(self.query_table).filter(self.query_table.aperture == self.aperture). \ - filter(run_field == True) + filter(run_field == True) # noqa: E712 (comparison to true) dates = np.zeros(0) if file_type.lower() == 'dark': @@ -739,7 +755,7 @@ def make_crds_parameter_dict(self): parameters['CHANNEL'] = 'SHORT' return parameters - def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files, dark_slope_files): + def process(self, illuminated_raw_files, illuminated_slope_files, flat_file_count_threshold, dark_raw_files, dark_slope_files, dark_file_count_threshold): """The main method for processing darks. See module docstrings for further details. @@ -769,35 +785,108 @@ def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files For cases where a raw file exists but no slope file, the slope file should be ``None`` """ + # Illuminated files - run entirety of calwebb_detector1 for uncal # files where corresponding rate file is 'None' - all_files = [] badpix_types = [] - badpix_types_from_flats = ['DEAD', 'LOW_QE', 'OPEN', 'ADJ_OPEN'] - badpix_types_from_darks = ['HOT', 'RC', 'OTHER_BAD_PIXEL', 'TELEGRAPH'] illuminated_obstimes = [] if illuminated_raw_files: - index = 0 - badpix_types.extend(badpix_types_from_flats) + logging.info("Found {} uncalibrated flat fields".format(len(illuminated_raw_files))) + badpix_types.extend(FLATS_BAD_PIXEL_TYPES) + out_exts = defaultdict(lambda: ['jump', '0_ramp_fit']) + in_files = [] for uncal_file, rate_file in zip(illuminated_raw_files, illuminated_slope_files): + logging.info("\tChecking illuminated raw file {} with rate file {}".format(uncal_file, rate_file)) self.get_metadata(uncal_file) if rate_file == 'None': - jump_output, rate_output, _ = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir, - ramp_fit=True, save_fitopt=False) - if self.nints > 1: - illuminated_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit') + short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') + local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + logging.info('Calling pipeline for {}'.format(uncal_file)) + logging.info("Copying raw file to {}".format(self.data_dir)) + copy_files([uncal_file], self.data_dir) + if hasattr(self, 'nints') and self.nints > 1: + out_exts[short_name] = ['jump', '1_ramp_fit'] + needs_calibration = False + for file_type in out_exts[short_name]: + if not os.path.isfile(local_uncal_file.replace("uncal", file_type)): + needs_calibration = True + if needs_calibration: + in_files.append(local_uncal_file) else: - illuminated_slope_files[index] = deepcopy(rate_output) - index += 1 + logging.info("\t\tCalibrated files already exist for {}".format(short_name)) + else: + logging.info("\tRate file found for {}".format(uncal_file)) + if os.path.isfile(rate_file): + copy_files([rate_file], self.data_dir) + else: + logging.warning("\tRate file {} doesn't actually exist".format(rate_file)) + short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') + local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + logging.info('Calling pipeline for {}'.format(uncal_file)) + logging.info("Copying raw file to {}".format(self.data_dir)) + copy_files([uncal_file], self.data_dir) + if hasattr(self, 'nints') and self.nints > 1: + out_exts[short_name] = ['jump', '1_ramp_fit'] + needs_calibration = False + for file_type in out_exts[short_name]: + if not os.path.isfile(local_uncal_file.replace("uncal", file_type)): + needs_calibration = True + if needs_calibration: + in_files.append(local_uncal_file) + else: + logging.info("\t\tCalibrated files already exist for {}".format(short_name)) + + outputs = {} + if len(in_files) > 0: + logging.info("Running pipeline for {} files".format(len(in_files))) + outputs = run_parallel_pipeline(in_files, "uncal", out_exts, self.instrument, jump_pipe=True) + + index = 0 + logging.info("Checking files post-calibration") + for uncal_file, rate_file in zip(illuminated_raw_files, illuminated_slope_files): + logging.info("\tChecking files {}, {}".format(os.path.basename(uncal_file), os.path.basename(rate_file))) + local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + if local_uncal_file in outputs: + logging.info("\t\tAdding calibrated file.") + illuminated_slope_files[index] = deepcopy(outputs[local_uncal_file][1]) + else: + logging.info("\t\tCalibration was skipped for file") + self.get_metadata(illuminated_raw_files[index]) + local_ramp_file = local_uncal_file.replace("uncal", "0_ramp_fit") + local_rateints_file = local_uncal_file.replace("uncal", "rateints") + if hasattr(self, 'nints') and self.nints > 1: + local_ramp_file = local_ramp_file.replace("0_ramp_fit", "1_ramp_fit") + if os.path.isfile(local_ramp_file): + logging.info("\t\t\tFound local ramp file") + illuminated_slope_files[index] = local_ramp_file + elif os.path.isfile(local_rateints_file): + logging.info("\t\t\tFound local rateints file") + illuminated_slope_files[index] = local_rateints_file + else: + logging.info("\t\t\tNo local files found") + illuminated_slope_files[index] = None + index += 1 # Get observation time for all files illuminated_obstimes.append(instrument_properties.get_obstime(uncal_file)) + logging.info("Trimming unfound files.") + index = 0 + while index < len(illuminated_raw_files): + if illuminated_slope_files[index] is None or illuminated_slope_files[index] == 'None': + logging.info("\tRemoving {}".format(illuminated_raw_files[index])) + del illuminated_raw_files[index] + del illuminated_slope_files[index] + del illuminated_obstimes[index] + else: + index += 1 - all_files = deepcopy(illuminated_slope_files) - - min_illum_time = min(illuminated_obstimes) - max_illum_time = max(illuminated_obstimes) - mid_illum_time = instrument_properties.mean_time(illuminated_obstimes) + min_illum_time = 0. + max_illum_time = 0. + mid_illum_time = 0. + if len(illuminated_obstimes) > 0: + min_illum_time = min(illuminated_obstimes) + max_illum_time = max(illuminated_obstimes) + mid_illum_time = instrument_properties.mean_time(illuminated_obstimes) # Dark files - Run calwebb_detector1 on all uncal files, saving the # Jump step output. If corresponding rate file is 'None', then also @@ -806,38 +895,117 @@ def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files dark_fitopt_files = [] dark_obstimes = [] if dark_raw_files: + logging.info("Found {} uncalibrated darks".format(len(dark_raw_files))) index = 0 - badpix_types.extend(badpix_types_from_darks) + badpix_types.extend(DARKS_BAD_PIXEL_TYPES) # In this case we need to run the pipeline on all input files, # even if the rate file is present, because we also need the jump # and fitops files, which are not saved by default + in_files = [] + out_exts = defaultdict(lambda: ['jump', 'fitopt', '0_ramp_fit']) for uncal_file, rate_file in zip(dark_raw_files, dark_slope_files): - jump_output, rate_output, fitopt_output = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir, - ramp_fit=True, save_fitopt=True) + logging.info("Checking dark file {} with rate file {}".format(uncal_file, rate_file)) self.get_metadata(uncal_file) - dark_jump_files.append(jump_output) - dark_fitopt_files.append(fitopt_output) - if self.nints > 1: - # dark_slope_files[index] = rate_output.replace('rate', 'rateints') - dark_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit') + short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') + local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + if not os.path.isfile(local_uncal_file): + logging.info("\tCopying raw file to {}".format(self.data_dir)) + copy_files([uncal_file], self.data_dir) + if hasattr(self, 'nints') and self.nints > 1: + out_exts[short_name] = ['jump', 'fitopt', '1_ramp_fit'] + local_processed_files = [local_uncal_file.replace("uncal", x) for x in out_exts[short_name]] + calibrated_data = [os.path.isfile(x) for x in local_processed_files] + if not all(calibrated_data): + logging.info('\tCalling pipeline for {} {}'.format(uncal_file, rate_file)) + in_files.append(local_uncal_file) + dark_jump_files.append(None) + dark_fitopt_files.append(None) + dark_slope_files[index] = None else: - dark_slope_files[index] = deepcopy(rate_output) + logging.info("\tProcessed files already exist.") + dark_jump_files.append(local_processed_files[0]) + dark_fitopt_files.append(local_processed_files[1]) + dark_slope_files[index] = deepcopy(local_processed_files[2]) dark_obstimes.append(instrument_properties.get_obstime(uncal_file)) index += 1 - if len(all_files) == 0: - all_files = deepcopy(dark_slope_files) - else: - all_files = all_files + dark_slope_files + outputs = {} + if len(in_files) > 0: + logging.info("Running pipeline for {} files".format(len(in_files))) + outputs = run_parallel_pipeline(in_files, "uncal", out_exts, self.instrument, jump_pipe=True) - min_dark_time = min(dark_obstimes) - max_dark_time = max(dark_obstimes) - mid_dark_time = instrument_properties.mean_time(dark_obstimes) + index = 0 + logging.info("Checking files post-calibration") + for uncal_file, rate_file in zip(dark_raw_files, dark_slope_files): + logging.info("\tChecking files {}, {}".format(uncal_file, rate_file)) + local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') + if local_uncal_file in outputs: + logging.info("\t\tAdding calibrated files") + dark_jump_files[index] = outputs[local_uncal_file][0] + dark_fitopt_files[index] = outputs[local_uncal_file][1] + dark_slope_files[index] = deepcopy(outputs[local_uncal_file][2]) + else: + logging.info("\t\tCalibration skipped for file") + self.get_metadata(local_uncal_file) + local_ramp_file = local_uncal_file.replace("uncal", "0_ramp_fit") + if hasattr(self, 'nints') and self.nints > 1: + local_ramp_file = local_ramp_file.replace("0_ramp_fit", "1_ramp_fit") + if not os.path.isfile(local_uncal_file.replace("uncal", "jump")): + logging.info("\t\t\tJump file not found") + dark_jump_files[index] = None + else: + dark_jump_files[index] = local_uncal_file.replace("uncal", "jump") + if not os.path.isfile(local_uncal_file.replace("uncal", "fitopt")): + logging.info("\t\t\tFitopt file not found") + dark_fitopt_files[index] = None + else: + dark_fitopt_files[index] = local_uncal_file.replace("uncal", "fitopt") + if not os.path.isfile(local_ramp_file): + if os.path.isfile(local_uncal_file.replace("uncal", "rateints")): + dark_slope_files[index] = local_uncal_file.replace("uncal", "rateints") + else: + logging.info("\t\t\tRate file not found") + dark_slope_files[index] = None + else: + dark_slope_files[index] = local_ramp_file + index += 1 + + index = 0 + logging.info("Trimming unfound files.") + while index < len(dark_raw_files): + if dark_jump_files[index] is None or dark_fitopt_files[index] is None or dark_slope_files[index] is None: + logging.info("\tRemoving {}".format(dark_raw_files[index])) + del dark_raw_files[index] + del dark_jump_files[index] + del dark_fitopt_files[index] + del dark_slope_files[index] + del dark_obstimes[index] + else: + index += 1 + + if len(dark_slope_files) > 0: + min_dark_time = min(dark_obstimes) + max_dark_time = max(dark_obstimes) + mid_dark_time = instrument_properties.mean_time(dark_obstimes) + + # Check whether there are still enough files left to meet the threshold + if illuminated_slope_files is None: + flat_length = 0 + else: + flat_length = len(illuminated_slope_files) + if dark_slope_files is None: + dark_length = 0 + else: + dark_length = len(dark_slope_files) + if (flat_length < flat_file_count_threshold) and (dark_length < dark_file_count_threshold): + logging.info("After removing failed files, not enough new files remian.") + return # For the dead flux check, filter out any files that have less than # 4 groups dead_flux_files = [] - if illuminated_raw_files: + if illuminated_raw_files is not None: for illum_file in illuminated_raw_files: ngroup = fits.getheader(illum_file)['NGROUPS'] if ngroup >= 4: @@ -859,6 +1027,18 @@ def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files query_string = 'darks_{}_flats_{}_to_{}'.format(self.dark_query_start, self.flat_query_start, self.query_end) output_file = '{}_{}_{}_bpm.fits'.format(self.instrument, self.aperture, query_string) output_file = os.path.join(self.output_dir, output_file) + +# logging.info("Calling bad_pixel_mask.bad_pixels") +# logging.info("\tflat_slope_files are: {}".format(illuminated_slope_files)) +# logging.info("\tdead__search_type={}".format(dead_search_type)) +# logging.info("\tflat_mean_normalization_method={}".format(flat_mean_normalization_method)) +# logging.info("\tdead_flux_check_files are: {}".format(dead_flux_files)) +# logging.info("\tdark_slope_files are: {}".format(dark_slope_files)) +# logging.info("\tdark_uncal_files are: {}".format(dark_raw_files)) +# logging.info("\tdark_jump_files are: {}".format(dark_jump_files)) +# logging.info("\tdark_fitopt_files are: {}".format(dark_fitopt_files)) +# logging.info("\toutput_file={}".format(output_file)) + bad_pixel_mask.bad_pixels(flat_slope_files=illuminated_slope_files, dead_search_type=dead_search_type, flat_mean_normalization_method=flat_mean_normalization_method, run_dead_flux_check=True, dead_flux_check_files=dead_flux_files, flux_check=35000, @@ -898,17 +1078,25 @@ def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files # Add new hot and dead pixels to the database logging.info('\tFound {} new {} pixels'.format(len(bad_location_list[0]), bad_type)) - if bad_type in badpix_types_from_flats: + if bad_type in FLATS_BAD_PIXEL_TYPES: self.add_bad_pix(bad_location_list, bad_type, illuminated_slope_files, min_illum_time, mid_illum_time, max_illum_time, baseline_file) - elif bad_type in badpix_types_from_darks: + flat_png = create_png_from_fits(illuminated_slope_files[0], self.output_dir) + elif bad_type in DARKS_BAD_PIXEL_TYPES: self.add_bad_pix(bad_location_list, bad_type, dark_slope_files, min_dark_time, mid_dark_time, max_dark_time, baseline_file) + dark_png = create_png_from_fits(dark_slope_files[0], self.output_dir) else: raise ValueError("Unrecognized type of bad pixel: {}. Cannot update database table.".format(bad_type)) + # Remove raw files, rate files, and pipeline products in order to save disk space + files_to_remove = glob(f'{self.data_dir}/*.fits') + for filename in files_to_remove: + os.remove(filename) + @log_fail @log_info + @only_one(key="bad_pixel_monitor") def run(self): """The main method. See module docstrings for further details. @@ -932,6 +1120,7 @@ def run(self): self.query_end = Time.now().mjd # Loop over all instruments + updated_instruments = [] for instrument in JWST_INSTRUMENT_NAMES: self.instrument = instrument @@ -1074,7 +1263,8 @@ def run(self): # Run the bad pixel monitor if run_flats or run_darks: - self.process(flat_uncal_files, flat_rate_files, dark_uncal_files, dark_rate_files) + self.process(flat_uncal_files, flat_rate_files, flat_file_count_threshold, dark_uncal_files, dark_rate_files, dark_file_count_threshold) + updated_instruments.append(self.instrument) # Update the query history if dark_uncal_files is None: @@ -1099,9 +1289,16 @@ def run(self): 'run_bpix_from_flats': run_flats, 'run_monitor': run_flats or run_darks, 'entry_date': datetime.datetime.now()} - self.query_table.__table__.insert().execute(new_entry) + with engine.begin() as connection: + connection.execute(self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') + # Update the figures to be shown in the web app. Only update figures + # for instruments where the monitor ran + for instrument in updated_instruments: + BadPixelPlots(instrument) + + logging.info(f'Updating web pages for: {updated_instruments}') logging.info('Bad Pixel Monitor completed successfully.') diff --git a/jwql/instrument_monitors/common_monitors/bias_monitor.py b/jwql/instrument_monitors/common_monitors/bias_monitor.py index e4ee90e06..9e1f9ad32 100755 --- a/jwql/instrument_monitors/common_monitors/bias_monitor.py +++ b/jwql/instrument_monitors/common_monitors/bias_monitor.py @@ -35,6 +35,7 @@ import datetime import logging import os +from time import sleep from astropy.io import fits from astropy.stats import sigma_clip, sigma_clipped_stats @@ -42,22 +43,24 @@ from astropy.visualization import ZScaleInterval import matplotlib matplotlib.use('Agg') -import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import make_axes_locatable -import numpy as np -from pysiaf import Siaf -from sqlalchemy.sql.expression import and_ - -from jwql.database.database_interface import session -from jwql.database.database_interface import NIRCamBiasQueryHistory, NIRCamBiasStats, NIRISSBiasQueryHistory, NIRISSBiasStats, NIRSpecBiasQueryHistory, NIRSpecBiasStats -from jwql.instrument_monitors import pipeline_tools -from jwql.utils import instrument_properties, monitor_utils -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.logging_functions import log_info, log_fail -from jwql.utils.monitor_utils import update_monitor_table -from jwql.utils.permissions import set_permissions -from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config - +import matplotlib.pyplot as plt # noqa: E402 (module import not at top) +from mpl_toolkits.axes_grid1 import make_axes_locatable # noqa: E402 (module import not at top) +import numpy as np # noqa: E402 (module import not at top) +from pysiaf import Siaf # noqa: E402 (module import not at top) +from sqlalchemy.sql.expression import and_ # noqa: E402 (module import not at top) + +from jwql.database.database_interface import session, engine # noqa: E402 (module import not at top) +from jwql.database.database_interface import NIRCamBiasQueryHistory, NIRCamBiasStats, NIRISSBiasQueryHistory # noqa: E402 (module import not at top) +from jwql.database.database_interface import NIRISSBiasStats, NIRSpecBiasQueryHistory, NIRSpecBiasStats # noqa: E402 (module import not at top) +from jwql.instrument_monitors import pipeline_tools # noqa: E402 (module import not at top) +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline # noqa: E402 (module import not at top) +from jwql.utils import instrument_properties, monitor_utils # noqa: E402 (module import not at top) +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE # noqa: E402 (module import not at top) +from jwql.utils.logging_functions import log_info, log_fail # noqa: E402 (module import not at top) +from jwql.utils.monitor_utils import update_monitor_table # noqa: E402 (module import not at top) +from jwql.utils.permissions import set_permissions # noqa: E402 (module import not at top) +from jwql.utils.utils import copy_files, ensure_dir_exists, filesystem_path, get_config # noqa: E402 (module import not at top) +from jwql.website.apps.jwql.monitor_pages.monitor_bias_bokeh import BiasMonitorPlots # noqa: E402 (module import not at top) class Bias(): @@ -344,7 +347,7 @@ def most_recent_search(self): """ query = session.query(self.query_table).filter(and_(self.query_table.aperture == self.aperture, - self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() + self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() # noqa: E348 (comparison to true) if len(query) == 0: query_result = 59607.0 # a.k.a. Jan 28, 2022 == First JWST images (MIRI) @@ -365,25 +368,25 @@ def process(self, file_list): List of filenames (including full paths) to the dark current files. """ + logging.info("Creating calibration tasks") + outputs = run_parallel_pipeline(file_list, "uncal_0thgroup", "refpix", self.instrument) for filename in file_list: logging.info('\tWorking on file: {}'.format(filename)) + if filename not in outputs: + processed_file = filename.replace("uncal_0thgroup", "refpix") + if not os.path.isfile(processed_file): + logging.warning("Pipeline was unable to process {}".format(filename)) + logging.warning("File will be skipped.") + continue + else: + processed_file = outputs[filename] + # Get relevant header info for this file self.read_pattern = fits.getheader(filename, 0)['READPATT'] self.expstart = '{}T{}'.format(fits.getheader(filename, 0)['DATE-OBS'], fits.getheader(filename, 0)['TIME-OBS']) - # Run the file through the necessary pipeline steps - pipeline_steps = self.determine_pipeline_steps() - logging.info('\tRunning pipeline on {}'.format(filename)) - try: - processed_file = pipeline_tools.run_calwebb_detector1_steps(filename, pipeline_steps) - logging.info('\tPipeline complete. Output: {}'.format(processed_file)) - set_permissions(processed_file) - except: - logging.info('\tPipeline processing failed for {}'.format(filename)) - continue - # Find amplifier boundaries so per-amp statistics can be calculated _, amp_bounds = instrument_properties.amplifier_info(processed_file, omit_reference_pixels=True) logging.info('\tAmplifier boundaries: {}'.format(amp_bounds)) @@ -427,11 +430,23 @@ def process(self, file_list): bias_db_entry[key] = float(amp_medians[key]) # Add this new entry to the bias database table - self.stats_table.__table__.insert().execute(bias_db_entry) - logging.info('\tNew entry added to bias database table: {}'.format(bias_db_entry)) + with engine.begin() as connection: + connection.execute(self.stats_table.__table__.insert(), bias_db_entry) + + # Don't print long arrays of numbers to the log file + log_dict = {} + for key in bias_db_entry: + if key not in ['collapsed_rows', 'collapsed_columns', 'counts', 'bin_centers']: + log_dict[key] = bias_db_entry[key] + logging.info('\tNew entry added to bias database table: {}'.format(log_dict)) + + # Remove the raw and calibrated files to save memory space + os.remove(filename) + os.remove(processed_file) @log_fail @log_info + @only_one(key='bias_monitor') def run(self): """The main method. See module docstrings for further details.""" @@ -525,9 +540,13 @@ def run(self): 'files_found': len(new_files), 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - self.query_table.__table__.insert().execute(new_entry) + with engine.begin() as connection: + connection.execute(self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') + # Update the bias monitor plots + BiasMonitorPlots(instrument) + logging.info('Bias Monitor completed successfully.') diff --git a/jwql/instrument_monitors/common_monitors/cosmic_ray_monitor.py b/jwql/instrument_monitors/common_monitors/cosmic_ray_monitor.py new file mode 100755 index 000000000..19c905806 --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/cosmic_ray_monitor.py @@ -0,0 +1,861 @@ +#! /usr/bin/env python + +"""This module runs the Cosmic Ray Monitor. + +This module contains code for the cosmic ray monitor, which currently +checks the number and magnitude of jumps in all observations performed +using a subset of apertures for each instrument. The code first checks +MAST for any new observations that have not yet been run through the monitor. It +then copies those files to a working directory, where they are run +through the pipeline, and for which the output is stored in a new +directory for each observation. Each observation is then analyzed for +jumps due to cosmic rays, of which the number and magnitude are +recorded. This information is then inserted into the stats database +table. + +Authors +------- + + - Mike Engesser + - Matt Bourque + - Bryan Hilbert + +Use +--- + + This module can be used from the command line as such: + + :: + python cosmic_ray_monitor.py +""" + +# Native Imports +from collections import defaultdict +import datetime +from glob import glob +import logging +import numpy as np +import os +import re +import shutil + +# Third-Party Imports +from astropy.io import fits +from astropy.time import Time +from jwst.datamodels import dqflags +import numpy as np +from pysiaf import Siaf +from sqlalchemy import func +from sqlalchemy.exc import StatementError, DataError, DatabaseError, InvalidRequestError, OperationalError +from sqlalchemy.sql.expression import and_ + +# Local imports +from jwql.database.database_interface import MIRICosmicRayQueryHistory +from jwql.database.database_interface import MIRICosmicRayStats +from jwql.database.database_interface import NIRCamCosmicRayQueryHistory +from jwql.database.database_interface import NIRCamCosmicRayStats +from jwql.database.database_interface import NIRISSCosmicRayQueryHistory +from jwql.database.database_interface import NIRISSCosmicRayStats +from jwql.database.database_interface import NIRSpecCosmicRayQueryHistory +from jwql.database.database_interface import NIRSpecCosmicRayStats +from jwql.database.database_interface import FGSCosmicRayQueryHistory +from jwql.database.database_interface import FGSCosmicRayStats +from jwql.database.database_interface import session, engine +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline +from jwql.utils import mast_utils +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_DATAPRODUCTS +from jwql.utils.logging_functions import configure_logging +from jwql.utils.logging_functions import log_info +from jwql.utils.logging_functions import log_fail +from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path, grouper + + +class CosmicRay: + """Class for executing the cosmic ray monitor. + + This class will search for new (since the previous instance of the + class) data in the file system. It will loop over + instrument/aperture combinations and find the number of new files + available. It will copy the files over to a working directory and + run the monitor. This will count the number and magnitude of all + cosmic rays in each new exposure. Results are all saved to + database tables. + + Attributes + ---------- + output_dir : str + Path into which outputs will be placed + + data_dir : str + Path into which new files will be copied to be worked on + + query_start : float + MJD start date to use for querying MAST + + query_end : float + MJD end date to use for querying MAST + + instrument : str + Name of instrument used to collect the dark current data + + aperture : str + Name of the aperture used for the dark current (e.g. + ``NRCA1_FULL``) + + query_table : sqlalchemy table + Table containing the history of cosmic ray monitor queries to MAST + for each instrument/aperture combination + + stats_table : sqlalchemy table + Table containing cosmic ray analysis results. Number and + magnitude of cosmic rays, etc. + + Raises + ------ + ValueError + If encountering a file not following the JWST file naming + convention + + ValueError + If the most recent query search returns more than one entry + """ + + def __init__(self): + """Initialize an instance of the ``Cosmic_Ray`` class.""" + + def filter_bases(self, file_list): + """Filter a list of input files. Strip off everything after the last + underscore (e.g. "i2d.fits"), and keep only once instance of the + remaining basename. + + Parameters + ---------- + file_list : list + List of fits files + + Returns + ------- + good_files : list + Filtered list of uncal file names + """ + good_files = [] + for filename in file_list: + # Search the first part of the filename for letters. (e.g. jw01059007003 + # without the jw). If there aren't any, then it's not a stage 3 product and + # we can continue. + substr = filename[2:13] + letters = re.findall("\D", substr) # noqa: W605 + if len(letters) == 0: + rev = filename[::-1] + under = rev.find('_') + base = rev[under + 1:][::-1] + uncal_file = f'{base}_uncal.fits' + if uncal_file not in good_files: + good_files.append(uncal_file) + return good_files + + def identify_tables(self): + """Determine which database tables to use for a run of the + cosmic ray monitor. + + Uses the instrument variable to get the mixed-case instrument + name, and uses that name to find the query and stats tables + for that instrument. + """ + + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument] + self.query_table = eval('{}CosmicRayQueryHistory'.format(mixed_case_name)) + self.stats_table = eval('{}CosmicRayStats'.format(mixed_case_name)) + + def get_cr_mags(self, jump_locs, jump_locs_pre, rateints, jump_data, jump_head): + """Gets the magnitude of each cosmic ray. + + Computes a list of magnitudes using the coordinate of the + detected jump compared to the magnitude of the same pixel in + the group prior to the jump. + + Parameters: + ---------- + + jump_locs: list + List of coordinates to a pixel marked with a jump. + + jump_locs_pre: list + List of matching coordinates one group before jump_locs. + + rateints: ndarray + Array in DN/s. + + jump_data: ndarray + Ndarray containing image data cube + + jump_head: FITS Header + FITS header unit containing information about the jump data + + Returns: + ------- + + mags: numpy.array + A histogram of cosmic ray magnitudes, from -65536 to 65536, with the number of + cosmic rays of each magnitude. + + """ + mag_bins = np.arange(65536 * 2 + 1, dtype=int) - 65536 + mags = np.zeros_like(mag_bins, dtype=int) + outliers = [] + num_outliers = 0 + total = 0 + + for coord, coord_gb in zip(jump_locs, jump_locs_pre): + total += 1 + mag = self.magnitude(coord, coord_gb, rateints, jump_data, jump_head) + if abs(mag) > 65535: + num_outliers += 1 + outliers.append(int(mag)) + else: + mags[mag_bins[mag]] += 1 + + logging.info("{} of {} cosmic rays are beyond bin boundaries".format(num_outliers, total)) + return [int(m) for m in mags], outliers + + def file_exists_in_database(self, filename): + """Checks if an entry for filename exists in the cosmic ray stats + database. + + Parameters + ---------- + filename : str + The full path to the uncal filename. + + Returns + ------- + file_exists : bool + ``True`` if filename exists in the bias stats database. + """ + + query = session.query(self.stats_table) + results = query.filter(self.stats_table.source_file == filename).all() + + if len(results) != 0: + file_exists = True + else: + file_exists = False + + session.close() + return file_exists + + def files_in_database(self): + """Checks all entries in the cosmic ray stats database. + + Returns + ------- + files : list + All files in the stats database + """ + + query = session.query(self.stats_table.source_file) + results = query.all() + session.close() + return results + + def get_cr_rate(self, cr_num, header): + """Given a number of CR hits, as well as the header from an observation file, + calculate the rate of CR hits per pixel + + Parameters + ---------- + cr_num : int + Number of jump flags identified in a particular exposure + + header : astropy.io.fits.header.Header + Header of the exposure file + + Returns + ------- + rate : float + Rate of CR flags per pixel per second + """ + + # Note that the pipeline's jump step is unable to find CR hits in + # the initial group. So let's subtract one group time from the effective + # exposure time in order to get the exposure time that was acutally + # searched + efftime = header['EFFEXPTM'] + group_time = header['TGROUP'] + efftime -= group_time + + num_pix = (header['SUBSIZE1'] * header['SUBSIZE2']) + + rate = cr_num / num_pix / efftime + return rate + + def get_jump_data(self, jump_filename): + """Opens and reads a given .FITS file containing cosmic rays. + + Parameters: + ---------- + jump_filename: str + Path to file. + + Returns: + ------- + head: FITS header + Header containing file information + + data: NoneType + FITS data + + dq: ndarray + Data Quality array containing jump flags. + + """ + try: + with fits.open(jump_filename) as hdu: + head = hdu[0].header + data = hdu[1].data + dq = hdu[3].data + except (IndexError, FileNotFoundError): + logging.warning(f'Could not open jump file: {jump_file} Skipping') + head = data = dq = None + + return head, data, dq + + def get_jump_locs(self, dq): + """Uses the data quality array to find the location of all + jumps in the data. + + Parameters: + ---------- + dq: ndarray + Data Quality array containing jump flags. + + Returns: + ------- + jump_locs: list + List of coordinates to a pixel marked with a jump. + """ + + temp = np.where(dq & dqflags.pixel["JUMP_DET"] > 0) + + jump_locs = [] + + if len(temp) == 4: + for i in range(len(temp[0])): + jump_locs.append((temp[0][i], temp[1][i], temp[2][i], temp[3][i])) + elif len(temp) == 3: + for i in range(len(temp[0])): + jump_locs.append((temp[0][i], temp[1][i], temp[2][i])) + elif len(temp) == 0: + # This is the (unlikely) case where the data contain no flagged CRs + pass + else: + logging.error(f'dq has {len(temp)} dimensions. We expect it to have 3 or 4.') + + return jump_locs + + def get_rate_data(self, rate_filename): + """Opens and reads a given .FITS file. + + Parameters: + ---------- + rate_filename: str + Path to file. + + Returns: + ------- + data: NoneType + FITS data + """ + try: + data = fits.getdata(rate_filename) + except FileNotFoundError: + logging.warning(f'Could not open rate file: {rate_file} Skipping') + data = None + + return data + + def group_before(self, jump_locs): + """Creates a list of coordinates one group before given jump + coordinates. + + Parameters: + ---------- + jump_locs: list + List of coordinates to a pixel marked with a jump. + + Returns: + ------- + jump_locs_pre: list + List of matching coordinates one group before jump_locs. + """ + + jump_locs_pre = [] + + if len(jump_locs) == 0: + logging.error("No entries in jump_locs!") + return [] + + if len(jump_locs[0]) == 4: + for coord in jump_locs: + jump_locs_pre.append((coord[0], coord[1] - 1, coord[2], coord[3])) + elif len(jump_locs[0]) == 3: + for coord in jump_locs: + jump_locs_pre.append((coord[0] - 1, coord[1], coord[2])) + else: + logging.error(f'jump_locs has {len(jump_locs[0])} dimensions. Expecting 3 or 4.') + + return jump_locs_pre + + def magnitude(self, coord, coord_gb, rateints, data, head): + """Calculates the magnitude of a list of jumps given their + coordinates in an array of pixels. + + Parameters: + ---------- + coord: tuple + Coordinate of jump. + + coord_gb: tuple + Coordinate of jump pixel one group before. + + head: FITS header + Header containing file information. + + rateints: ndarray + Array in DN/s. + + Returns: + ------- + cr_mag: float + the magnitude of the cosmic ray + """ + + grouptime = head['TGROUP'] + + if self.nints == 1: + rate = rateints[coord[-2]][coord[-1]] + cr_mag = data[0][coord[0]][coord[1]][coord[2]] \ + - data[0][coord_gb[0]][coord_gb[1]][coord_gb[2]] \ + - rate * grouptime + + else: + rate = rateints[coord[0]][coord[-2]][coord[-1]] + cr_mag = data[coord] - data[coord_gb] - rate * grouptime + + return int(np.round(np.nan_to_num(cr_mag))) + + def most_recent_search(self): + """Adapted from Dark Monitor (Bryan Hilbert) + + Query the query history database and return the information + on the most recent query for the given ``aperture_name`` where + the cosmic ray monitor was executed. + + Returns: + ------- + query_result : float + Date (in MJD) of the ending range of the previous MAST + query where the cosmic ray monitor was run. + """ + + sub_query = session.query(self.query_table.aperture, + func.max(self.query_table.end_time_mjd).label('maxdate') + ).group_by(self.query_table.aperture).subquery('t2') + + # Note that "self.query_table.run_monitor == True" below is + # intentional. Switching = to "is" results in an error in the query. + query = session.query(self.query_table).join( + sub_query, + and_( + self.query_table.aperture == self.aperture, + self.query_table.end_time_mjd == sub_query.c.maxdate, + self.query_table.run_monitor == True # noqa: E712 + ) + ).all() + + query_count = len(query) + if query_count == 0: + query_result = 57357.0 # a.k.a. Dec 1, 2015 == CV3 + logging.info(('\tNo query history for {}. Beginning search date will be set to {}.' + .format(self.aperture, query_result))) + else: + query_result = query[0].end_time_mjd + + return query_result + + def possible_apers(self, inst): + """Return possible apertures to check for cosmic rays + + Parameters: + ---------- + inst: str + The name of the instrument of interest + + Returns: + ------- + apers: list + A list of possible apertures to check for the given + instrument + """ + if inst.lower() == 'nircam': + apers = ['NRCA1_FULL', + 'NRCA2_FULL', + 'NRCA3_FULL', + 'NRCA4_FULL', + 'NRCA5_FULL', + + 'NRCB1_FULL', + 'NRCB2_FULL', + 'NRCB3_FULL', + 'NRCB4_FULL', + 'NRCB5_FULL'] + + if inst.lower() == 'miri': + apers = ['MIRIM_FULL', + 'MIRIM_ILLUM', + 'MIRIM_BRIGHTSKY', + 'MIRIM_SUB256', + 'MIRIM_SUB128', + 'MIRIM_SUB64', + 'MIRIM_CORON1065', + 'MIRIM_CORON1140', + 'MIRIM_CORON1550', + 'MIRIM_CORONLYOT', + 'MIRIM_SLITLESSPRISM', + 'MIRIFU_CHANNEL1A', + 'MIRIFU_CHANNEL1B' + 'MIRIFU_CHANNEL1C', + 'MIRIFU_CHANNEL2A', + 'MIRIFU_CHANNEL2B' + 'MIRIFU_CHANNEL2C', + 'MIRIFU_CHANNEL3A', + 'MIRIFU_CHANNEL3B' + 'MIRIFU_CHANNEL3C', + 'MIRIFU_CHANNEL4A', + 'MIRIFU_CHANNEL4B', + 'MIRIFU_CHANNEL4C'] + + if inst.lower() == 'niriss': + apers = ['NIS_CEN'] + + if inst.lower() == 'nirspec': + apers = ['NRS_FULL_MSA'] + + if inst.lower() == 'fgs': + apers = ['FGS1_FULL', 'FGS2_FULL'] + + return apers + + def process(self, file_list): + """The main method for processing files. See module docstrings + for further details. + + Parameters + ---------- + file_list : list + List of filenames (including full paths) to the cosmic ray + files + """ + for file_chunk in grouper(file_list, 100): + + input_files = [] + in_ext = "uncal" + out_exts = defaultdict(lambda: ['jump', '0_ramp_fit']) + instrument = self.instrument + existing_files = {} + no_coord_files = [] + + for file_name in file_chunk: + + # Dont process files that already exist in the bias stats database + logging.info("Checking for {} in database".format(os.path.basename(file_name))) + file_exists = self.file_exists_in_database(os.path.basename(file_name)) + if file_exists: + logging.info('\t{} already exists in the bias database table.'.format(file_name)) + continue + + file_basename = os.path.basename(file_name) + dir_name = file_basename[:19] # jw###########_##### + + self.obs_dir = os.path.join(self.data_dir, dir_name) + logging.info(f'Setting obs_dir to {self.obs_dir}') + ensure_dir_exists(self.obs_dir) + + if 'uncal' in file_name: + head = fits.getheader(file_name) + self.nints = head['NINTS'] + + copied, failed_to_copy = copy_files([file_name], self.obs_dir) + # If the file cannot be copied to the working directory, skip it + if len(failed_to_copy) > 0: + continue + + # Next we run the pipeline on the files to get the proper outputs + uncal_file = os.path.join(self.obs_dir, os.path.basename(file_name)) + jump_file = uncal_file.replace("uncal", "jump") + rate_file = uncal_file.replace("uncal", "0_ramp_fit") + if self.nints > 1: + rate_file = rate_file.replace("0_ramp_fit", "1_ramp_fit") + + if (not os.path.isfile(jump_file)) or (not os.path.isfile(rate_file)): + logging.info("Adding {} to calibration tasks".format(uncal_file)) + + short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') + + input_files.append(uncal_file) + if self.nints > 1: + out_exts[short_name] = ['jump', '1_ramp_fit'] + else: + logging.info("Calibrated files for {} already exist".format(uncal_file)) + existing_files[uncal_file] = [jump_file, rate_file] + + output_files = run_parallel_pipeline(input_files, in_ext, out_exts, instrument, jump_pipe=True) + for file_name in existing_files: + if file_name not in input_files: + input_files.append(file_name) + output_files[file_name] = existing_files[file_name] + + for file_name in input_files: + + head = fits.getheader(file_name) + self.nints = head['NINTS'] + + dir_name = '_'.join(os.path.basename(file_name).split('_')[:2]) # file_name[51:76] + self.obs_dir = os.path.join(self.data_dir, dir_name) + + if file_name not in output_files: + skip = False + head = fits.getheader(file_name) + nints = head['NINTS'] + out_exts = ["jump", "0_ramp_fit"] + if nints > 1: + out_exts[-1] = "1_ramp_fit" + for ext in out_exts: + ext_file = os.path.basename(file_name).replace("uncal", "ext") + if not os.path.isfile(os.path.join(self.obs_dir, ext_file)): + logging.warning("\tOutput {} missing".format(ext_file)) + logging.warning("\tSkipping {}".format(os.path.basename(file_name))) + skip = True + if skip: + continue + + obs_files = output_files[file_name] + + # Next we analyze the cosmic rays in the new data + for output_file in obs_files: + logging.info("Checking output file {}".format(output_file)) + + if 'jump' in output_file: + logging.debug("Adding jump file {}".format(os.path.basename(output_file))) + jump_file = os.path.join(self.obs_dir, os.path.basename(output_file)) + + if self.nints == 1: + logging.debug("Looking for single integration rate file") + if '0_ramp_fit' in output_file: + logging.debug("Adding rate file {}".format(os.path.basename(output_file))) + rate_file = os.path.join(self.obs_dir, os.path.basename(output_file)) + + elif self.nints > 1: + logging.debug("Looking for multi-integration rate file") + if '1_ramp_fit' in output_file: + logging.debug("Adding rate file {}".format(os.path.basename(output_file))) + rate_file = os.path.join(self.obs_dir, os.path.basename(output_file)) + + logging.info(f'\tUsing {jump_file} and {rate_file} to monitor CRs.') + + jump_head, jump_data, jump_dq = self.get_jump_data(jump_file) + rate_data = self.get_rate_data(rate_file) + if jump_head is None or rate_data is None: + continue + + jump_locs = self.get_jump_locs(jump_dq) + if len(jump_locs) == 0: + no_coord_files.append(os.path.basename(file_name)) + jump_locs_pre = self.group_before(jump_locs) + cosmic_ray_num = len(jump_locs) + + logging.info(f'\tFound {cosmic_ray_num} CR-flags.') + + # Translate CR count into a CR rate per pixel, so that all exposures + # can go on one plot regardless of exposure time and aperture size + cr_rate = self.get_cr_rate(cosmic_ray_num, jump_head) + logging.info(f'\tNormalizing by time and area, this is {cr_rate} jumps/sec/pixel.') + + # Get observation time info + obs_start_time = jump_head['EXPSTART'] + obs_end_time = jump_head['EXPEND'] + start_time = Time(obs_start_time, format='mjd', scale='utc').isot.replace('T', ' ') + end_time = Time(obs_end_time, format='mjd', scale='utc').isot.replace('T', ' ') + + cosmic_ray_mags, outlier_mags = self.get_cr_mags(jump_locs, jump_locs_pre, rate_data, jump_data, jump_head) + + # Insert new data into database + try: + logging.info("Inserting {} in database".format(os.path.basename(file_name))) + cosmic_ray_db_entry = {'entry_date': datetime.datetime.now(), + 'aperture': self.aperture, + 'source_file': os.path.basename(file_name), + 'obs_start_time': start_time, + 'obs_end_time': end_time, + 'jump_count': cosmic_ray_num, + 'jump_rate': cr_rate, + 'magnitude': cosmic_ray_mags, + 'outliers': outlier_mags + } + with engine.begin() as connection: + connection.execute(self.stats_table.__table__.insert(), cosmic_ray_db_entry) + + logging.info("Successfully inserted into database. \n") + + # Delete fits files in order to save disk space + logging.info("Removing pipeline products in order to save disk space. \n") + try: + for file in [file_name, jump_file, rate_file]: + if os.path.isfile(file): + os.remove(file) + if os.path.exists(self.obs_dir): + os.rmdir(self.obs_dir) + except OSError as e: + logging.error(f"Unable to delete {self.obs_dir}") + logging.error(e) + except (StatementError, DataError, DatabaseError, InvalidRequestError, OperationalError) as e: + logging.error("Could not insert entry into database. \n") + logging.error(e) + + if len(no_coord_files) > 0: + logging.error("{} files had no jump co-ordinates".format(len(no_coord_files))) + for file_name in no_coord_files: + logging.error("\t{} had no jump co-ordinates".format(file_name)) + + + def pull_filenames(self, file_info): + """Extract filenames from the list of file information returned from + query_mast. + + Parameters + ---------- + file_info : dict + Dictionary of file information returned by ``query_mast`` + + Returns + ------- + files : list + List of filenames (without paths) extracted from ``file_info`` + """ + files = [element['filename'] for element in file_info['data']] + return files + + @log_fail + @log_info + @only_one(key='cosmic_ray_monitor') + def run(self): + """The main method. See module docstrings for additional info + + Queries MAST for new MIRI data and copies it to a working + directory where it is run through the JWST pipeline. The output + of the 'jump' and 'rate' steps is used to determine the number + and magnitudes of cosmic rays which is then saved to the + database. + """ + + logging.info('Begin logging for cosmic_ray_monitor') + + self.query_end = Time.now().mjd + + for instrument in JWST_INSTRUMENT_NAMES: + self.instrument = instrument + + # Identify which tables to use + self.identify_tables() + + # Get a list of possible apertures + possible_apertures = self.possible_apers(instrument) + + for aperture in possible_apertures: + + logging.info('') + logging.info('Working on aperture {} in {}'.format(aperture, instrument)) + + self.aperture = aperture + + # We start by querying MAST for new data + self.query_start = self.most_recent_search() + + logging.info('\tMost recent query: {}'.format(self.query_start)) + logging.info(f'\tQuerying MAST from {self.query_start} to {self.query_end}') + new_entries = self.query_mast() + logging.info(f'\tNew MAST query returned dictionary with {len(new_entries["data"])} files.') + new_entries = self.pull_filenames(new_entries) + + # Filter new entries so we omit stage 3 results and keep only base names + new_entries = self.filter_bases(new_entries) + logging.info(f'\tAfter filtering to keep only uncal files, we are left with {len(new_entries)} files') + + for fname in new_entries: + logging.info(f'{fname}') + + new_filenames = [] + for file_entry in new_entries: + try: + new_filenames.append(filesystem_path(file_entry)) + except FileNotFoundError: + logging.info('\t{} not found in target directory'.format(file_entry)) + except ValueError: + logging.info( + '\tProvided file {} does not follow JWST naming conventions.'.format(file_entry)) + + # Next we copy new files to the working directory + output_dir = os.path.join(get_config()['outputs'], 'cosmic_ray_monitor') + + self.data_dir = os.path.join(output_dir, 'data') + ensure_dir_exists(self.data_dir) + + self.process(new_filenames) + + monitor_run = True + + new_entry = {'instrument': self.instrument, + 'aperture': self.aperture, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'files_found': len(new_entries), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + with engine.begin() as connection: + connection.execute(self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') + + def query_mast(self): + """Use astroquery to search MAST for cosmic ray data + + Parameters: + ---------- + start_date : float + Starting date for the search in MJD + end_date : float + Ending date for the search in MJD + + Returns + ------- + result : list + List of dictionaries containing the query results + """ + + data_product = JWST_DATAPRODUCTS + parameters = {"date_obs_mjd": {"min": self.query_start, "max": self.query_end}, "apername": self.aperture} + + result = mast_utils.instrument_inventory(self.instrument, data_product, + add_filters=parameters, + return_data=True) + + return result + + +if __name__ == '__main__': + # Configure logging + module = os.path.basename(__file__).strip('.py') + configure_logging(module) + + # Call the main function + monitor = CosmicRay() + monitor.run() diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 515eca58e..c73dd3543 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -17,12 +17,12 @@ is entered into the ``DarkCurrent`` database table. The mean slope image is then normalized by an existing baseline slope -image. New hot pixels are identified as those with normalized signal -rates above a ``hot_threshold`` value. Similarly, pixels with -normalized signal rates below a ``dead_threshold`` are flagged as new -dead pixels. +image, from the previous run of the monitor. New hot pixels are identified +as those with normalized signal rates above a ``hot_threshold`` value. +Similarly, pixels with normalized signal rates below a ``dead_threshold`` +are flagged as new dead pixels. -The standard deviation slope image is normalized by a baseline +The standard deviation slope image is also normalized by a baseline (historical) standard deviation image. Pixels with normalized values above a noise threshold are flagged as newly noisy pixels. @@ -38,6 +38,26 @@ The histogram itself as well as the best-fit Gaussian and double Gaussian parameters are saved to the DarkDarkCurrent database table. +Currently, there are three outputs from the dark monitor that are shown +in the JWQL web app. First, the dark current histogram is plotted, along +with a corresponding cumulative distribution function (CDF). The Gaussian +fits are not currently shown. + +Secondly, a trending plot of the mean dark current versus time is shown, +where the mean value is the sigma-clipped mean across the detector in +the mean slope image. Error bars on the plot show the sigma-clipped +standard deviation across the detector. + +Finally, the mean slope image is shown. Any new potential hot, dead, and +noisy pixels that were identified are also shown on the mean slope image, +in order to give an idea of where these pixels are located on the detector. +To keep the image from becoming too busy, this is only done if the number +of potential new bad pixels is under 1000. If more pixels that this are +identified, that number is reported in the plot, but the pixels are not +marked on the image. + + + Author ------ @@ -56,31 +76,36 @@ from copy import copy, deepcopy import datetime +from glob import glob import logging import os from astropy.io import ascii, fits from astropy.modeling import models +from astropy.stats import sigma_clipped_stats from astropy.time import Time +from bokeh.models import ColorBar, ColumnDataSource, HoverTool, Legend +from bokeh.models import LinearColorMapper +from bokeh.plotting import figure import numpy as np from pysiaf import Siaf from sqlalchemy import func from sqlalchemy.sql.expression import and_ -from jwql.database.database_interface import session +from jwql.database.database_interface import session, engine from jwql.database.database_interface import NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent from jwql.database.database_interface import NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent from jwql.database.database_interface import MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent from jwql.database.database_interface import NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent from jwql.database.database_interface import FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent from jwql.instrument_monitors import pipeline_tools -from jwql.jwql_monitors import monitor_mast -from jwql.utils import calculations, instrument_properties, monitor_utils -from jwql.utils.constants import ASIC_TEMPLATES, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_DATAPRODUCTS, \ - RAPID_READPATTERNS +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline +from jwql.utils import calculations, instrument_properties, mast_utils, monitor_utils +from jwql.utils.constants import ASIC_TEMPLATES, DARK_MONITOR_MAX_BADPOINTS_TO_PLOT, JWST_INSTRUMENT_NAMES, FULL_FRAME_APERTURES +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_DATAPRODUCTS, RAPID_READPATTERNS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.permissions import set_permissions -from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path +from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path, save_png THRESHOLDS_FILE = os.path.join(os.path.split(__file__)[0], 'dark_monitor_file_thresholds.txt') @@ -188,10 +213,15 @@ def add_bad_pix(self, coordinates, pixel_type, files, mean_filename, baseline_fi logging.info('Adding {} {} pixels to database.'.format(len(coordinates[0]), pixel_type)) + # Change to int from numpy.int64 because the latter can't be put into the + # database apparently. + x_coord = [int(x) for x in coordinates[0]] + y_coord = [int(y) for y in coordinates[1]] + source_files = [os.path.basename(item) for item in files] entry = {'detector': self.detector, - 'x_coord': coordinates[0], - 'y_coord': coordinates[1], + 'x_coord': x_coord, + 'y_coord': y_coord, 'type': pixel_type, 'source_files': source_files, 'obs_start_time': observation_start_time, @@ -200,7 +230,120 @@ def add_bad_pix(self, coordinates, pixel_type, files, mean_filename, baseline_fi 'mean_dark_image_file': os.path.basename(mean_filename), 'baseline_file': os.path.basename(baseline_filename), 'entry_date': datetime.datetime.now()} - self.pixel_table.__table__.insert().execute(entry) + with engine.begin() as connection: + connection.execute(self.pixel_table.__table__.insert(), entry) + + def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, noisyxy=None, baseline_file=None): + """Create and save a png containing the mean dark slope image, + to be displayed in the web app + + Parameters + ---------- + image : numpy.ndarray + 2D array of the dark slop image + + num_files : int + Number of individual exposures that went into creating the mean slope image + + hotxy : tup + 2-tuple of lists that give x, y coordinates of possible new hot pixels + + deadxy : tup + 2-tuple of lists that give x, y coordinates of possible new hot pixels + + noisyxy : tup + 2-tuple of lists that give x, y coordinates of possible new hot pixels + + baseline_file : str + Name of fits file containing the mean slope image to which ``image`` was compared + when looking for new hot/dead/noisy pixels + """ + output_filename = '{}_{}_{}_to_{}_mean_slope_image.png'.format(self.instrument.lower(), + self.aperture.lower(), + self.query_start, self.query_end) + + mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images') + + ensure_dir_exists(mean_slope_dir) + output_filename = os.path.join(mean_slope_dir, output_filename) + logging.info("Name of mean slope image: {}".format(output_filename)) + + if image is not None: + # Get info on image for better display later + ny, nx = image.shape + img_mn, img_med, img_dev = sigma_clipped_stats(image[4: ny - 4, 4: nx - 4]) + + # Create figure + start_time = Time(float(self.query_start), format='mjd').tt.datetime.strftime("%m/%d/%Y") + end_time = Time(float(self.query_end), format='mjd').tt.datetime.strftime("%m/%d/%Y") + + self.plot = figure(title=f'{self.aperture}: {num_files} files. {start_time} to {end_time}', tools='') + # tools='pan,box_zoom,reset,wheel_zoom,save') + self.plot.x_range.range_padding = self.plot.y_range.range_padding = 0 + + # Create the color mapper that will be used to scale the image + mapper = LinearColorMapper(palette='Viridis256', low=(img_med - (5 * img_dev)), high=(img_med + (5 * img_dev))) + + # Plot image and add color bar + imgplot = self.plot.image(image=[image], x=0, y=0, dw=nx, dh=ny, + color_mapper=mapper, level="image") + + color_bar = ColorBar(color_mapper=mapper, width=8, title='DN/sec') + self.plot.add_layout(color_bar, 'right') + + if (('FULL' in self.aperture) or ('_CEN' in self.aperture)): + + if hotxy is not None: + # Create lists of hot/dead/noisy pixel values if present + hot_vals = [] + for x, y in zip(hotxy[0], hotxy[1]): + if ((x < nx) & (y < ny)): + hot_vals.append(image[y, x]) + else: + hot_vals = None + + if deadxy is not None: + dead_vals = [] + for x, y in zip(deadxy[0], deadxy[1]): + if ((x < nx) & (y < ny)): + dead_vals.append(image[y, x]) + else: + dead_vals = None + + if noisyxy is not None: + noisy_vals = [] + for x, y in zip(noisyxy[0], noisyxy[1]): + if ((x < nx) & (y < ny)): + noisy_vals.append(image[y, x]) + else: + noisy_vals = None + + hot_legend = self.overplot_bad_pix("hot", hotxy, hot_vals) + dead_legend = self.overplot_bad_pix("dead", deadxy, dead_vals) + noisy_legend = self.overplot_bad_pix("noisy", noisyxy, noisy_vals) + + # Collect information about the file this image was compared against + if baseline_file is not None: + base_parts = os.path.basename(baseline_file).split('_') + + # Get the starting and ending time from the filename. + base_start = Time(float(base_parts[3]), format='mjd').tt.datetime + base_end = Time(float(base_parts[5]), format='mjd').tt.datetime + base_start_time = base_start.strftime("%m/%d/%Y") + base_end_time = base_end.strftime("%m/%d/%Y") + legend_title = f'Compared to dark from {base_start_time} to {base_end_time}' + else: + legend_title = 'Compared to previous mean dark' + legend = Legend(items=[hot_legend, dead_legend, noisy_legend], + location="center", + orientation='vertical', + title=legend_title) + + self.plot.add_layout(legend, 'below') + + # Save the plot in a png + save_png(self.plot, filename=output_filename) + set_permissions(output_filename) def get_metadata(self, filename): """Collect basic metadata from a fits file @@ -215,8 +358,8 @@ def get_metadata(self, filename): try: self.detector = header['DETECTOR'] - self.x0 = header['SUBSTRT1'] - self.y0 = header['SUBSTRT2'] + self.x0 = header['SUBSTRT1'] - 1 + self.y0 = header['SUBSTRT2'] - 1 self.xsize = header['SUBSIZE1'] self.ysize = header['SUBSIZE2'] self.sample_time = header['TSAMPLE'] @@ -250,9 +393,16 @@ def exclude_existing_badpix(self, badpix, pixel_type): List of y coordinates of new bad pixels """ + if len(badpix[0]) == 0: + logging.warning("\tNo new {} pixels to check.".format(pixel_type)) + return ([], []) + + logging.info("\tChecking {} potential new {} pixels".format(len(badpix[0]), pixel_type)) + if pixel_type not in ['hot', 'dead', 'noisy']: raise ValueError('Unrecognized bad pixel type: {}'.format(pixel_type)) + logging.info("\t\tRunning database query") db_entries = session.query(self.pixel_table) \ .filter(self.pixel_table.type == pixel_type) \ .filter(self.pixel_table.detector == self.detector) \ @@ -265,17 +415,28 @@ def exclude_existing_badpix(self, badpix, pixel_type): y_coords = _row.y_coord for x, y in zip(x_coords, y_coords): already_found.append((x, y)) + found_x = np.array([x[0] for x in already_found]) + found_y = np.array([x[1] for x in already_found]) + msg = "\t\tChecking pixels against list of {} existing {} pixels" + logging.info(msg.format(len(found_x), pixel_type)) # Check to see if each pixel already appears in the database for # the given bad pixel type new_pixels_x = [] new_pixels_y = [] for x, y in zip(badpix[0], badpix[1]): - pixel = (x, y) - if pixel not in already_found: + ind_x = np.where(found_x == x) + ind_y = np.where(found_y == y) + if len(np.intersect1d(ind_x[0], ind_y[0])) == 0: new_pixels_x.append(x) new_pixels_y.append(y) + logging.info("\t\tKeeping {} {} pixels".format(len(new_pixels_x), pixel_type)) +# pixel = (x, y) +# if pixel not in already_found: +# new_pixels_x.append(x) +# new_pixels_y.append(y) + session.close() return (new_pixels_x, new_pixels_y) @@ -383,7 +544,7 @@ def most_recent_search(self): """ query = session.query(self.query_table).filter(self.query_table.aperture == self.aperture, self.query_table.readpattern == self.readpatt). \ - filter(self.query_table.run_monitor == True) + filter(self.query_table.run_monitor == True) # noqa: E348 (comparison to true) dates = np.zeros(0) for instance in query: @@ -434,6 +595,72 @@ def noise_check(self, new_noise_image, baseline_noise_image, threshold=1.5): return noisy + def overplot_bad_pix(self, pix_type, coords, values): + """Add a scatter plot of potential new bad pixels to the plot + + Parameters + ---------- + pix_type : str + Type of bad pixel. "hot", "dead", or "noisy" + + coords : tup + 2-tuple of lists, containing the x and y coordinates of the bad pixels + + values : list + Values in the mean dark image at the locations of the bad pixels + + Returns + ------- + legend_item : tup + Tuple of legend text and associated plot. Will be converted into + a LegendItem and added to the plot legend + """ + if coords is None: + coords = ([], []) + values = [] + + numpix = len(coords[0]) + + colors = {"hot": "red", "dead": "blue", "noisy": "pink"} + adjective = {"hot": "hotter", "dead": "lower", "noisy": "noisier"} + sources = {} + badpixplots = {} + hover_tools = {} + + # Need to make sources a dict because we can't use the same variable name + # for multiple ColumnDataSources + sources = {} + badpixplots = {} + + # If the number of pixels to overplot is higher than the threshold, + # then empty the coords list. This way we can still create a + # legend entry for them + if numpix > DARK_MONITOR_MAX_BADPOINTS_TO_PLOT: + coords = ([], []) + values = [] + + sources[pix_type] = ColumnDataSource(data=dict(pixels_x=coords[0], + pixels_y=coords[1] + ) + ) + + # Overplot the bad pixel locations + badpixplots[pix_type] = self.plot.circle(x=f'pixels_x', y=f'pixels_y', + source=sources[pix_type], color=colors[pix_type]) + + # Add to the legend + if numpix > 0: + if numpix <= DARK_MONITOR_MAX_BADPOINTS_TO_PLOT: + text = f"{numpix} pix {adjective[pix_type]} than baseline" + else: + text = f"{numpix} pix {adjective[pix_type]} than baseline (not shown)" + else: + text = f"No new {adjective[pix_type]}" + + # Create a tuple to be added to the plot legend + legend_item = (text, [badpixplots[pix_type]]) + return legend_item + def process(self, file_list): """The main method for processing darks. See module docstrings for further details. @@ -448,55 +675,33 @@ def process(self, file_list): # Basic metadata that will be needed later self.get_metadata(file_list[0]) - # Determine which pipeline steps need to be executed - required_steps = pipeline_tools.get_pipeline_steps(self.instrument) - logging.info('\tRequired calwebb1_detector pipeline steps to have' - 'data in correct format:') - for item in required_steps: - logging.info('\t\t{}: {}'.format(item, required_steps[item])) - - # Modify the list of pipeline steps to skip those not needed for the - # preparation of dark current data - required_steps['dark_current'] = False - required_steps['persistence'] = False - - # NIRSpec IR^2 readout pattern NRSIRS2 is the only one with - # nframes not a power of 2 - if self.read_pattern not in pipeline_tools.GROUPSCALE_READOUT_PATTERNS: - required_steps['group_scale'] = False - # Run pipeline steps on files, generating slope files + pipeline_files = [] slope_files = [] for filename in file_list: - completed_steps = pipeline_tools.completed_pipeline_steps(filename) - steps_to_run = pipeline_tools.steps_to_run(required_steps, completed_steps) - logging.info('\tWorking on file: {}'.format(filename)) - logging.info('\tPipeline steps that remain to be run:') - for item in steps_to_run: - logging.info('\t\t{}: {}'.format(item, steps_to_run[item])) - # Run any remaining required pipeline steps - if any(steps_to_run.values()) is False: - slope_files.append(filename) - else: - processed_file = filename.replace('.fits', '_{}.fits'.format('rate')) + rate_file = filename.replace("dark", "rate") + rate_file_name = os.path.basename(rate_file) + local_rate_file = os.path.join(self.data_dir, rate_file_name) - # If the slope file already exists, skip the pipeline call - if not os.path.isfile(processed_file): - logging.info('\tRunning pipeline on {}'.format(filename)) - processed_file = pipeline_tools.run_calwebb_detector1_steps(os.path.abspath(filename), steps_to_run) - logging.info('\tPipeline complete. Output: {}'.format(processed_file)) + if os.path.isfile(local_rate_file): + logging.info("\t\tFile {} exists, skipping pipeline".format(local_rate_file)) + slope_files.append(local_rate_file) + else: + logging.info("\t\tAdding {} to calibration set".format(filename)) + pipeline_files.append(filename) - else: - logging.info('\tSlope file {} already exists. Skipping call to pipeline.' - .format(processed_file)) - pass + # Specify that we want to skip the dark current correction step + step_args = {'dark_current': {'skip': True}} + # Call the pipeline + outputs = run_parallel_pipeline(pipeline_files, "dark", ["rate"], self.instrument, step_args=step_args) + for filename in file_list: + processed_file = filename.replace("_dark", "_rate") + if processed_file not in slope_files and os.path.isfile(processed_file): slope_files.append(processed_file) - - # Delete the original dark ramp file to save disk space os.remove(filename) obs_times = [] @@ -513,77 +718,109 @@ def process(self, file_list): max_time = np.max(obs_times) mid_time = instrument_properties.mean_time(obs_times) - # Read in all slope images and place into a list - slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files) - - # Calculate a mean slope image from the inputs - slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) - mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image, slope_files) - logging.info('\tSigma-clipped mean of the slope images saved to: {}'.format(mean_slope_file)) - - # ----- Search for new hot/dead/noisy pixels ----- - # Read in baseline mean slope image and stdev image - # The baseline image is used to look for hot/dead/noisy pixels, - # but not for comparing mean dark rates. Therefore, updates to - # the baseline can be minimal. - - # Limit checks for hot/dead/noisy pixels to full frame data since - # subarray data have much shorter exposure times and therefore lower - # signal-to-noise - aperture_type = Siaf(self.instrument)[self.aperture].AperType - if aperture_type == 'FULLSCA': - baseline_file = self.get_baseline_filename() - if baseline_file is None: - logging.warning(('\tNo baseline dark current countrate image for {} {}. Setting the ' - 'current mean slope image to be the new baseline.'.format(self.instrument, self.aperture))) - baseline_file = mean_slope_file - baseline_mean = deepcopy(slope_image) - baseline_stdev = deepcopy(stdev_image) - else: - logging.info('\tBaseline file is {}'.format(baseline_file)) - baseline_mean, baseline_stdev = self.read_baseline_slope_image(baseline_file) - - # Check the hot/dead pixel population for changes - new_hot_pix, new_dead_pix = self.find_hot_dead_pixels(slope_image, baseline_mean) - - # Shift the coordinates to be in full frame coordinate system - new_hot_pix = self.shift_to_full_frame(new_hot_pix) - new_dead_pix = self.shift_to_full_frame(new_dead_pix) - - # Exclude hot and dead pixels found previously - new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot') - new_dead_pix = self.exclude_existing_badpix(new_dead_pix, 'dead') - - # Add new hot and dead pixels to the database - logging.info('\tFound {} new hot pixels'.format(len(new_hot_pix[0]))) - logging.info('\tFound {} new dead pixels'.format(len(new_dead_pix[0]))) - self.add_bad_pix(new_hot_pix, 'hot', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) - self.add_bad_pix(new_dead_pix, 'dead', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) - - # Check for any pixels that are significantly more noisy than - # in the baseline stdev image - new_noisy_pixels = self.noise_check(stdev_image, baseline_stdev) - - # Shift coordinates to be in full_frame coordinate system - new_noisy_pixels = self.shift_to_full_frame(new_noisy_pixels) - - # Exclude previously found noisy pixels - new_noisy_pixels = self.exclude_existing_badpix(new_noisy_pixels, 'noisy') - - # Add new noisy pixels to the database - logging.info('\tFound {} new noisy pixels'.format(len(new_noisy_pixels[0]))) - self.add_bad_pix(new_noisy_pixels, 'noisy', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) - - # ----- Calculate image statistics ----- + try: - # Find amplifier boundaries so per-amp statistics can be calculated - number_of_amps, amp_bounds = instrument_properties.amplifier_info(slope_files[0]) - logging.info('\tAmplifier boundaries: {}'.format(amp_bounds)) + # Read in all slope images and place into a list + slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files) + + # Calculate a mean slope image from the inputs + slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) + mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image, slope_files) + + # Free up memory + del slope_image_stack + + # ----- Search for new hot/dead/noisy pixels ----- + # Read in baseline mean slope image and stdev image + # The baseline image is used to look for hot/dead/noisy pixels, + # but not for comparing mean dark rates. Therefore, updates to + # the baseline can be minimal. + + # Limit checks for hot/dead/noisy pixels to full frame data since + # subarray data have much shorter exposure times and therefore lower + # signal-to-noise + new_hot_pix = None + new_dead_pix = None + new_noisy_pixels = None + aperture_type = Siaf(self.instrument)[self.aperture].AperType + if aperture_type == 'FULLSCA': + baseline_file = self.get_baseline_filename() + if baseline_file is None: + logging.warning(('\tNo baseline dark current countrate image for {} {}. Setting the ' + 'current mean slope image to be the new baseline.'.format(self.instrument, self.aperture))) + baseline_file = mean_slope_file + baseline_mean = deepcopy(slope_image) + baseline_stdev = deepcopy(stdev_image) + else: + logging.info('\tBaseline file is {}'.format(baseline_file)) + baseline_mean, baseline_stdev = self.read_baseline_slope_image(baseline_file) + + # Check the hot/dead pixel population for changes + logging.info("\tFinding new hot/dead pixels") + new_hot_pix, new_dead_pix = self.find_hot_dead_pixels(slope_image, baseline_mean) + + # Shift the coordinates to be in full frame coordinate system + logging.info("\tShifting hot pixels to full frame") + new_hot_pix = self.shift_to_full_frame(new_hot_pix) + + # Exclude hot and dead pixels found previously + logging.info("\tExcluding previously-known hot pixels") + new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot') + + # Add new hot and dead pixels to the database + logging.info('\tFound {} new hot pixels'.format(len(new_hot_pix[0]))) + self.add_bad_pix(new_hot_pix, 'hot', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) + + # Same thing for dead pixels + logging.info("\tShifting dead pixels to full frame") + new_dead_pix = self.shift_to_full_frame(new_dead_pix) + logging.info("\tExcluding previously-known dead pixels") + new_dead_pix = self.exclude_existing_badpix(new_dead_pix, 'dead') + logging.info('\tFound {} new dead pixels'.format(len(new_dead_pix[0]))) + self.add_bad_pix(new_dead_pix, 'dead', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) + + # Check for any pixels that are significantly more noisy than + # in the baseline stdev image + logging.info("\tChecking for noisy pixels") + new_noisy_pixels = self.noise_check(stdev_image, baseline_stdev) + + # Shift coordinates to be in full_frame coordinate system + logging.info("\tShifting noisy pixels to full frame") + new_noisy_pixels = self.shift_to_full_frame(new_noisy_pixels) + + # Exclude previously found noisy pixels + logging.info("\tExcluding existing bad pixels from noisy pixels") + new_noisy_pixels = self.exclude_existing_badpix(new_noisy_pixels, 'noisy') + + # Add new noisy pixels to the database + logging.info('\tFound {} new noisy pixels'.format(len(new_noisy_pixels[0]))) + self.add_bad_pix(new_noisy_pixels, 'noisy', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) + + logging.info("Creating Mean Slope Image") + # Create png file of mean slope image. Add bad pixels only for full frame apertures + self.create_mean_slope_figure(slope_image, len(slope_files), hotxy=new_hot_pix, deadxy=new_dead_pix, + noisyxy=new_noisy_pixels, baseline_file=baseline_file) + logging.info('\tSigma-clipped mean of the slope images saved to: {}'.format(mean_slope_file)) + + # ----- Calculate image statistics ----- + + # Find amplifier boundaries so per-amp statistics can be calculated + number_of_amps, amp_bounds = instrument_properties.amplifier_info(slope_files[0]) + logging.info('\tAmplifier boundaries: {}'.format(amp_bounds)) + + # Calculate mean and stdev values, and fit a Gaussian to the + # histogram of the pixels in each amp + (amp_mean, amp_stdev, gauss_param, gauss_chisquared, double_gauss_params, double_gauss_chisquared, + histogram, bins) = self.stats_by_amp(slope_image, amp_bounds) + + # Remove the input files in order to save disk space + files_to_remove = glob(f'{self.data_dir}/*fits') + for filename in files_to_remove: + os.remove(filename) - # Calculate mean and stdev values, and fit a Gaussian to the - # histogram of the pixels in each amp - (amp_mean, amp_stdev, gauss_param, gauss_chisquared, double_gauss_params, double_gauss_chisquared, - histogram, bins) = self.stats_by_amp(slope_image, amp_bounds) + except Exception as e: + logging.critical("ERROR: {}".format(e)) + raise e # Construct new entry for dark database table source_files = [os.path.basename(item) for item in file_list] @@ -606,11 +843,12 @@ def process(self, file_list): 'double_gauss_width2': double_gauss_params[key][5], 'double_gauss_chisq': double_gauss_chisquared[key], 'mean_dark_image_file': os.path.basename(mean_slope_file), - 'hist_dark_values': bins, - 'hist_amplitudes': histogram, + 'hist_dark_values': bins[key], + 'hist_amplitudes': histogram[key], 'entry_date': datetime.datetime.now() } - self.stats_table.__table__.insert().execute(dark_db_entry) + with engine.begin() as connection: + connection.execute(self.stats_table.__table__.insert(), dark_db_entry) def read_baseline_slope_image(self, filename): """Read in a baseline mean slope image and associated standard @@ -640,6 +878,7 @@ def read_baseline_slope_image(self, filename): @log_fail @log_info + @only_one(key='dark_monitor') def run(self): """The main method. See module docstrings for further details. @@ -755,8 +994,8 @@ def run(self): # then the monitor will not be run if len(new_filenames) < file_count_threshold: logging.info(("\tFilesystem search for the files identified by MAST has returned {} files. " - "This is less than the required minimum number of files ({}) necessary to run " - "the monitor. Quitting.").format(len(new_filenames), file_count_threshold)) + "This is less than the required minimum number of files ({}) necessary to run " + "the monitor. Quitting.").format(len(new_filenames), file_count_threshold)) monitor_run = False else: logging.info(("\tFilesystem search for the files identified by MAST has returned {} files.") @@ -767,13 +1006,26 @@ def run(self): # Set up directories for the copied data ensure_dir_exists(os.path.join(self.output_dir, 'data')) self.data_dir = os.path.join(self.output_dir, - 'data/{}_{}'.format(self.instrument.lower(), - self.aperture.lower())) + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) ensure_dir_exists(self.data_dir) # Copy files from filesystem dark_files, not_copied = copy_files(new_filenames, self.data_dir) + # Check that there were no problems with the file copying. If any of the copied + # files have different sizes between the MAST filesystem and the JWQL filesystem, + # then throw them out. + for dark_file in dark_files: + copied_size = os.stat(dark_file).st_size + orig_size = os.stat(filesystem_path(os.path.basename(dark_file))).st_size + if orig_size != copied_size: + logging.info(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem.") + logging.info(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") + not_copied.append(dark_file) + dark_files.remove(dark_file) + os.remove(dark_file) + logging.info('\tNew_filenames: {}'.format(new_filenames)) logging.info('\tData dir: {}'.format(self.data_dir)) logging.info('\tCopied to working dir: {}'.format(dark_files)) @@ -784,20 +1036,22 @@ def run(self): else: logging.info(('\tDark monitor skipped. MAST query has returned {} new dark files for ' - '{}, {}, {}. {} new files are required to run dark current monitor.') - .format(len(new_entries), instrument, aperture, self.readpatt, file_count_threshold)) + '{}, {}, {}. {} new files are required to run dark current monitor.') + .format(len(new_entries), instrument, aperture, self.readpatt, file_count_threshold)) monitor_run = False # Update the query history new_entry = {'instrument': instrument, - 'aperture': aperture, - 'readpattern': self.readpatt, - 'start_time_mjd': self.query_start, - 'end_time_mjd': self.query_end, - 'files_found': len(new_entries), - 'run_monitor': monitor_run, - 'entry_date': datetime.datetime.now()} - self.query_table.__table__.insert().execute(new_entry) + 'aperture': aperture, + 'readpattern': self.readpatt, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'files_found': len(new_entries), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('Dark Monitor completed successfully.') @@ -921,11 +1175,11 @@ def stats_by_amp(self, image, amps): Reduced chi-squared for the best-fit parameters. Keys are amp numbers as strings - hist : numpy.ndarray - 1D array of histogram values + hist : dict + Dictionary of 1D arrays of histogram values - bin_centers : numpy.ndarray - 1D array of bin centers that match the ``hist`` values. + bins : dict + Dictionary of 1D arrays of bin centers that match the ``hist`` values. """ amp_means = {} @@ -934,10 +1188,12 @@ def stats_by_amp(self, image, amps): gaussian_chi_squared = {} double_gaussian_params = {} double_gaussian_chi_squared = {} + hists = {} + bins = {} # Add full image coords to the list of amp_boundaries, so that full # frame stats are also calculated. - if 'FULL' in self.aperture: + if self.aperture in FULL_FRAME_APERTURES[self.instrument.upper()]: maxx = 0 maxy = 0 for amp in amps: @@ -968,14 +1224,16 @@ def stats_by_amp(self, image, amps): hist, bin_edges = np.histogram(image[indexes[0], indexes[1]], bins='auto', range=(lower_bound, upper_bound)) - # If the number of bins is smaller than the number of paramters + # If the number of bins is smaller than the number of parameters # to be fit, then we need to increase the number of bins if len(bin_edges) < 7: logging.info('\tToo few histogram bins in initial fit. Forcing 10 bins.') hist, bin_edges = np.histogram(image[indexes[0], indexes[1]], bins=10, - range=(lower_bound, upper_bound)) + range=(lower_bound, upper_bound)) bin_centers = (bin_edges[1:] + bin_edges[0: -1]) / 2. + hists[key] = hist.astype(float) + bins[key] = bin_centers initial_params = [np.max(hist), amp_mean, amp_stdev] # Fit a Gaussian to the histogram. Save best-fit params and @@ -1022,7 +1280,7 @@ def stats_by_amp(self, image, amps): .format(double_gaussian_chi_squared)) return (amp_means, amp_stdevs, gaussian_params, gaussian_chi_squared, double_gaussian_params, - double_gaussian_chi_squared, hist.astype(float), bin_centers) + double_gaussian_chi_squared, hists, bins) if __name__ == '__main__': diff --git a/jwql/instrument_monitors/common_monitors/edb_monitor_data/fgs_mnemonics_to_monitor.json b/jwql/instrument_monitors/common_monitors/edb_monitor_data/fgs_mnemonics_to_monitor.json new file mode 100644 index 000000000..11da7716e --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_monitor_data/fgs_mnemonics_to_monitor.json @@ -0,0 +1,20 @@ +{ + "all": [ + { + "name": "IFGS_ACQ_INUM", + "database_id": "IFGS_ACQ_INUM", + "description": "FGS ACQ Image Data Processing Telemetry Integration Number", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Groups and Ints" + }, + { + "name": "IFGS_EVENTG2_GROUPCNT", + "database_id": "IFGS_EVENTG2_GROUPCNT", + "description": "SCP_EVENT_TLM event group count", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Groups and Ints" + } + ] +} diff --git a/jwql/instrument_monitors/common_monitors/edb_monitor_data/miri_mnemonics_to_monitor.json b/jwql/instrument_monitors/common_monitors/edb_monitor_data/miri_mnemonics_to_monitor.json new file mode 100644 index 000000000..e3fcbaa13 --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_monitor_data/miri_mnemonics_to_monitor.json @@ -0,0 +1,1972 @@ +{ + "daily_means": [ + { + "name": "SE_ZIMIRICEA", + "database_id": "SE_ZIMIRICEA_NO_OPS", + "description": "ICE drive current (no ops)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "*SE_ZBUSVLT", + "nominal_value": 7.11, + "yellow_limits": [6.54, 7.68], + "plot_category": "Power" + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT4", + "database_id": "IMIR_HK_ICE_SEC_VOLT4_NO_OPS", + "description": "ICE Secondary Voltage 4 (no ops)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "nominal_value": 4.533, + "yellow_limits": [4.523, 4.543], + "plot_category": "ICE_voltage" + }, + { + "name": "IGDP_MIR_ICE_INTER_TEMP", + "description": "ICE internal temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.0, 30.0], + "plot_category": "ICE_temperature" + }, + { + "name": "ST_ZTC1MIRIA", + "description": "HTCL14 Thermistor #1 - MIRI/DITCE Panel Temperature A", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.0, 30.0], + "plot_category": "ICE_temperature" + }, + + { + "name": "IGDP_MIR_ICE_T1P_CRYO", + "description": "Deck Nominal Temperature (T1)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Deck_temperature" + }, + { + "name": "IGDP_MIR_ICE_T2R_CRYO", + "description": "Deck Redundant Temperature (T2)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Deck_temperature" + }, + { + "name": "IGDP_MIR_ICE_T3LW_CRYO", + "description": "LW FPM I/F Temperature (T3)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "FPM_temperature" + }, + { + "name": "IGDP_MIR_ICE_T4SW_CRYO", + "description": "SW FPM I/F Temperature (T4)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "FPM_temperature" + }, + { + "name": "IGDP_MIR_ICE_T5IMG_CRYO", + "description": "IM FPM I/F Temperature (T5)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "FPM_temperature" + }, + { + "name": "IGDP_MIR_ICE_T6DECKCRYO", + "description": "Deck Opp. Nom. Temperature (T6)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Deck_temperature" + }, + { + "name": "IGDP_MIR_ICE_T7IOC_CRYO", + "description": "Deck Opp. Red. Temperature (T7)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Deck_temperature" + }, + { + "name": "IGDP_MIR_ICE_FW_CRYO", + "description": "FWA Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Mech_temp" + }, + { + "name": "IGDP_MIR_ICE_CCC_CRYO", + "description": "CCC Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Mech_temp" + }, + { + "name": "IGDP_MIR_ICE_GW14_CRYO", + "description": "DGA-A (GW14) Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Mech_temp" + }, + { + "name": "IGDP_MIR_ICE_GW23_CRYO", + "description": "DGA-B (GW23) Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Mech_temp" + }, + { + "name": "IGDP_MIR_ICE_POMP_CRYO", + "description": "POMH Nominal Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "POM_temperature" + }, + { + "name": "IGDP_MIR_ICE_POMR_CRYO", + "description": "POMH Redundant Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "POM_temperature" + }, + { + "name": "IGDP_MIR_ICE_IFU_CRYO", + "description": "MRS (CF) Cal. Source Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Cal_src_temperature" + }, + { + "name": "IGDP_MIR_ICE_IMG_CRYO", + "description": "Imager (CI) Cal. Source Temperature", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "nominal", + "yellow_limits": [5.7, 7.5], + "plot_category": "Cal_src_temperature" + }, + { + "name": "SE_ZIMIRFPEA", + "description": "FPE drive current (READY)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "*SE_ZBUSVLT", + "nominal_value": 28.5, + "yellow_limits": [26.13, 30.87], + "plot_category": "Power" + }, + { + "name": "IMIR_PDU_V_DIG_5V", + "description": "FPE 5V Digital Voltage (V)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "nominal_value": 4.978, + "yellow_limits": [4.928, 5.028], + "plot_category": "FPE_voltage" + }, + { + "name": "IMIR_PDU_I_DIG_5V", + "description": "FPE 5V Digital Current (mA)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "FPE_current" + }, + { + "name": "IMIR_PDU_V_ANA_5V", + "description": "FPE +5V Analog Voltage (V)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "nominal_value": 4.991, + "yellow_limits": [4.941, 5.041], + "plot_category": "FPE_voltage" + }, + { + "name": "IMIR_PDU_I_ANA_5V", + "description": "FPE +5V Analog Current (mA)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "FPE_current" + }, + { + "name": "IMIR_PDU_V_ANA_N5V", + "description": "FPE -5V Analog V (V)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "nominal_value": -5.023, + "yellow_limits": [-5.073, -4.973], + "plot_category": "FPE_voltage" + }, + { + "name": "IMIR_PDU_I_ANA_N5V", + "description": "FPE -5V Analog Current (mA)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "FPE_current" + }, + { + "name": "IMIR_PDU_V_ANA_7V", + "description": "FPE +7V Analog Voltage (V)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "nominal_value": 6.953, + "yellow_limits": [6.903, 7.053], + "plot_category": "FPE_voltage" + }, + { + "name": "IMIR_PDU_I_ANA_7V", + "description": "FPE +7V Analog Current (mA)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "FPE_current" + }, + { + "name": "IMIR_PDU_V_ANA_N7V", + "description": "FPE -7V Analog V (V)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "nominal_value": -6.943, + "yellow_limits": [-6.993, -6.893], + "plot_category": "FPE_voltage" + }, + { + "name": "IMIR_PDU_I_ANA_N7V", + "description": "FPE -7V Analog Current (mA)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "FPE_current" + }, + { + "name": "IMIR_SPW_V_DIG_2R5V", + "description": "FPE 2.5V Digital Voltage (V)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "nominal_value": 2.5043, + "yellow_limits": [2.4543, 2.5543], + "plot_category": "FPE_voltage" + }, + { + "name": "IMIR_PDU_V_REF_2R5V", + "description": "FPE 2.5V PDU Ref voltage (V)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "FPE_voltage" + }, + { + "name": "IGDP_MIR_IC_V_VDETCOM", + "description": "Detector Bias VDETCOM (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_SW_V_VDETCOM", + "description": "Detector Bias VDETCOM (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_LW_V_VDETCOM", + "description": "Detector Bias VDETCOM (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_IC_V_VSSOUT", + "description": "Detector Bias VSSOUT (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_SW_V_VSSOUT", + "description": "Detector Bias VSSOUT (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_LW_V_VSSOUT", + "description": "Detector Bias VSSOUT (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_IC_V_VRSTOFF", + "description": "Detector Bias VRSTOFF (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_SW_V_VRSTOFF", + "description": "Detector Bias VRSTOFF (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_LW_V_VRSTOFF", + "description": "Detector Bias VRSTOFF (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_IC_V_VP", + "description": "Detector Bias (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_SW_V_VP", + "description": "Detector Bias (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_LW_V_VP", + "description": "Detector Bias (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_IC_V_VDDUC", + "description": "Detector Bias VDDUC (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_SW_V_VDDUC", + "description": "Detector Bias VDDUC (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IGDP_MIR_LW_V_VDDUC", + "description": "Detector Bias VDDUC (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_bias" + }, + { + "name": "IMIR_PDU_TEMP", + "description": "FPE PDU Temperature", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "yellow_limits": [238.15, 366.15], + "plot_category": "FPE_temperature" + }, + { + "name": "ST_ZTC2MIRIA", + "description": "FPE A IEC panel temperature", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "FPE_temperature" + }, + { + "name": "IMIR_IC_SCE_ANA_TEMP1", + "description": "FPE SCE Analogue board Temperature (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "yellow_limits": [238.2, 350.0], + "plot_category": "FPE_temperature" + }, + { + "name": "IMIR_SW_SCE_ANA_TEMP1", + "description": "FPE SCE Analogue board Temperature (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "yellow_limits": [238.2, 350.0], + "plot_category": "FPE_temperature" + }, + { + "name": "IMIR_LW_SCE_ANA_TEMP1", + "description": "FPE SCE Analogue board Temperature (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "yellow_limits": [238.2, 350.0], + "plot_category": "FPE_temperature" + }, + { + "name": "IMIR_IC_SCE_DIG_TEMP", + "description": "FPE SCE Digital board Temperature (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "yellow_limits": [238.2, 350.0], + "plot_category": "FPE_temperature" + }, + { + "name": "IMIR_SW_SCE_DIG_TEMP", + "description": "FPE SCE Digital board Temperature (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "yellow_limits": [238.2, 350.0], + "plot_category": "FPE_temperature" + }, + { + "name": "IMIR_LW_SCE_DIG_TEMP", + "description": "FPE SCE Digital board Temperature (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "yellow_limits": [238.2, 350.0], + "plot_category": "FPE_temperature" + }, + { + "name": "IGDP_MIR_IC_DET_TEMP", + "description": "Detector Temperature (IC)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_temperature" + }, + { + "name": "IGDP_MIR_SW_DET_TEMP", + "description": "Detector Temperature (SW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_temperature" + }, + { + "name": "IGDP_MIR_LW_DET_TEMP", + "description": "Detector Temperature (LW)", + "dependency": [ + { + "name": "SE_ZIMIRFPEA", + "relation": ">", + "threshold": 0.5 + }, + { + "name": "IGDP_IT_MIR_IC_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_LW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + }, + { + "name": "IGDP_IT_MIR_SW_STATUS", + "relation": "=", + "threshold": "DETECTOR_READY" + } + ], + "plot_data": "nominal", + "plot_category": "Detector_temperature" + } + ], + "block_means":[ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "database_id": "IMIR_HK_ICE_SEC_VOLT1_OPS", + "description": "ICE Secondary Voltage (HV) 1", + "dependency": [ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": ">", + "threshold": 25 + } + ], + "plot_data": "nominal", + "nominal_value": 39.24, + "yellow_limits": [39.14, 39.34], + "plot_category": "ICE_voltage" + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT2", + "database_id": "IMIR_HK_ICE_SEC_VOLT2_OPS", + "description": "ICE Secondary Voltage (HV) 2", + "dependency": [ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": ">", + "threshold": 25 + } + ], + "plot_data": "nominal", + "nominal_value": 79.256, + "yellow_limits": [79.006, 79.506], + "plot_category": "ICE_voltage" + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT3", + "database_id": "IMIR_HK_ICE_SEC_VOLT3_OPS", + "description": "ICE Secondary Voltage (HV) 3", + "dependency": [ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": ">", + "threshold": 25 + } + ], + "plot_data": "nominal", + "nominal_value": 39.868, + "yellow_limits": [39.818, 39.918], + "plot_category": "ICE_voltage" + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT4", + "database_id": "IMIR_HK_ICE_SEC_VOLT4_OPS", + "description": "ICE Secondary Voltage (HV) 4", + "dependency": [ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": ">", + "threshold": 25 + } + ], + "plot_data": "nominal", + "nominal_value": 4.881, + "yellow_limits": [4.861, 4.911], + "plot_category": "ICE_voltage" + }, + { + "name": "SE_ZIMIRICEA", + "database_id": "SE_ZIMIRICEA_OPS", + "description": "ICE drive current", + "dependency": [ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": ">", + "threshold": 25 + } + ], + "plot_data": "*SE_ZBUSVLT,max", + "nominal_value": 11.4, + "yellow_limits": [10.23, 12.02], + "plot_category": "Power" + }, + { + "name": "IMIR_HK_FW_POS_VOLT", + "database_id": "IMIR_HK_FW_POS_VOLT", + "description": "FW position sensor supply voltage", + "dependency": [ + { + "name": "IMIR_HK_FW_POS_VOLT", + "relation": ">", + "threshold": 0.25 + } + ], + "plot_data": "nominal", + "nominal_value": 296.79, + "yellow_limits": [294.79, 298.79], + "plot_category": "Pos_Sen_Volts" + }, + { + "name": "IMIR_HK_GW14_POS_VOLT", + "database_id": "IMIR_HK_GW14_POS_VOLT", + "description": "GW14 position sensor supply voltage", + "dependency": [ + { + "name": "IMIR_HK_GW14_POS_VOLT", + "relation": ">", + "threshold": 0.25 + } + ], + "plot_data": "nominal", + "nominal_value": 291.62, + "yellow_limits": [289.62, 293.62], + "plot_category": "Pos_Sen_Volts" + }, + { + "name": "IMIR_HK_GW23_POS_VOLT", + "database_id": "IMIR_HK_GW23_POS_VOLT", + "description": "GW23 position sensor supply voltage", + "dependency": [ + { + "name": "IMIR_HK_GW23_POS_VOLT", + "relation": ">", + "threshold": 0.25 + } + ], + "plot_data": "nominal", + "nominal_value": 293.503, + "yellow_limits": [291.503, 295.503], + "plot_category": "Pos_Sen_Volts" + }, + { + "name": "IMIR_HK_CCC_POS_VOLT", + "database_id": "IMIR_HK_CCC_POS_VOLT", + "description": "CCC position sensor supply voltage", + "dependency": [ + { + "name": "IMIR_HK_CCC_POS_VOLT", + "relation": ">", + "threshold": 0.25 + } + ], + "plot_data": "nominal", + "nominal_value": 296.057, + "yellow_limits": [294.057, 298.057], + "plot_category": "Pos_Sen_Volts" + } + ], + "every_change": [ + { + "name": "IMIR_HK_FW_POS_RATIO", + "description": "FW normalized position sensor voltage ratio", + "dependency": [ + { + "name": "IMIR_HK_FW_CUR_POS", + "relation": "none", + "threshold": 0 + } + ], + "plot_data": "nominal", + "yellow_limits": [-1.6, 1.6], + "plot_category": "Position_sensors" + }, + { + "name": "IMIR_HK_GW14_POS_RATIO", + "description": "GW14 normalized position sensor voltage ratio", + "dependency": [ + { + "name": "IMIR_HK_GW14_CUR_POS", + "relation": "none", + "threshold": 0 + } + ], + "plot_data": "nominal", + "yellow_limits": [-1.6, 1.6], + "plot_category": "Position_sensors" + }, + { + "name": "IMIR_HK_GW23_POS_RATIO", + "description": "GW23 normalized position sensor voltage ratio", + "dependency": [ + { + "name": "IMIR_HK_GW23_CUR_POS", + "relation": "none", + "threshold": 0 + } + ], + "plot_data": "nominal", + "yellow_limits": [-1.6, 1.6], + "plot_category": "Position_sensors" + }, + { + "name": "IMIR_HK_CCC_POS_RATIO", + "description": "CCC normalized position sensor voltage ratio", + "dependency": [ + { + "name": "IMIR_HK_CCC_CUR_POS", + "relation": "none", + "threshold": 0 + } + ], + "plot_data": "nominal", + "yellow_limits": [-6.0, 6.0], + "plot_category": "Position_sensors" + } + ] +} \ No newline at end of file diff --git a/jwql/instrument_monitors/common_monitors/edb_monitor_data/nircam_mnemonics_to_monitor.json b/jwql/instrument_monitors/common_monitors/edb_monitor_data/nircam_mnemonics_to_monitor.json new file mode 100644 index 000000000..ec04c5ffe --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_monitor_data/nircam_mnemonics_to_monitor.json @@ -0,0 +1,2211 @@ +{ + "daily_means": [ + { + "name": "INRC_ICE_DC_VOL_P5_DIG", + "description": "ICE HK +5V voltage for digital electronics", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [4.99, 5.04], + "red_limits": [4.5, 5.5], + "plot_category": "ICE_voltage" + }, + { + "name": "INRC_ICE_DC_VOL_P15_ALL", + "description": "ICE HK +15V voltage for all functions", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [13.9, 14.7], + "red_limits": [11.5, 16.5], + "plot_category": "ICE_voltage" + }, + { + "name": "INRC_ICE_DC_VOL_N15_ALL", + "description": "ICE HK -15V voltage for all functions", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-14.7, -13.9], + "red_limits": [-16.5, -11.5], + "plot_category": "ICE_voltage" + }, + { + "name": "INRC_ICE_DC_VOL_P15_MOT", + "description": "ICE HK +15V voltage for FWA & PIL motor drive", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [13.9, 14.7], + "red_limits": [11.5, 16.5], + "plot_category": "ICE_voltage" + }, + { + "name": "INRC_ICE_DC_VOL_N15_MOT", + "description": "ICE HK -15V voltage for FWA & PIL motor drive", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-14.7, -13.9], + "red_limits": [-16.5, -11.5], + "plot_category": "ICE_voltage" + }, + { + "name": "INRC_ICE_DC_VOL_P30_MOT", + "description": "ICE HK +15V voltage for FAM motor drive", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [13.6, 14.5], + "red_limits": [12, 33], + "plot_category": "ICE_voltage" + }, + { + "name": "INRC_ICE_DC_VOL_N30_MOT", + "description": "ICE HK -15V voltage for FAM motor drive", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-14.6, -13.7], + "red_limits": [-33, -12], + "plot_category": "ICE_voltage" + }, + { + "name": "INRC_FA_PSC_VMNSCR", + "description": "ModA SCR voltage out monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.19, 3.24], + "red_limits": [-1.0, 3.45], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VD3P3", + "description": "ModA PSC +3.3 V digital voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.29, 3.31], + "red_limits": [-1.0, 3.5], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VPADC", + "description": "ModA PSC ADC +2.5V monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [2.5, 2.503], + "red_limits": [-1.0, 2.7], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VD2P5", + "description": "ModA PSC +2.5V digital voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [2.478, 2.486], + "red_limits": [-1.0, 2.6], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCD_5P", + "description": "ModA PSC Digital voltage monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [5.05, 5.2], + "red_limits": [-1.0, 5.5], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCA_8P", + "description": "ModA PSC +6.25VDC power-in monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [6.228, 6.248], + "red_limits": [-1.0, 6.75], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCA_8N", + "description": "ModA PSC -6.25VDC power-in monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-6.3, -6.15], + "red_limits": [-6.75, 1], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCA_5P", + "description": "ModA PSC +5VDC regulator monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [5.255, 5.28], + "red_limits": [-1.0, 5.4], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCA_5N", + "description": "ModA PSC -5VDC regulator monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-5.285, -5.26], + "red_limits": [-5.4, 1.0], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCR_7P", + "description": "ModA PSC reference +6.25V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [6.2, 6.4], + "red_limits": [-1.0, 7.5], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCR_7N", + "description": "ModA PSC reference -6.25V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-6.3, -6.1], + "red_limits": [-7.5, 1.0], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCR_5P", + "description": "ModA PSC reference +5V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [5.05, 5.25], + "red_limits": [-1.0, 5.4], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FA_PSC_VDCR_5N", + "description": "ModA PSC reference -5V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-5.15, -4.95], + "red_limits": [-5.4, 1.0], + "plot_category": "ModA_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VMNSCR", + "description": "ModB SCR voltage out monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.19, 3.24], + "red_limits": [-1.0, 3.45], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VD3P3", + "description": "ModB PSC +3.3V digital voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.29, 3.34], + "red_limits": [-1.0, 3.5], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VPADC", + "description": "ModB PSC ADC +2.5V monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [2.5, 2.503], + "red_limits": [-1.0, 2.7], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VD2P5", + "description": "ModB PSC +2.5V digital voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [2.488, 2.501], + "red_limits": [-1.0, 2.6], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCD_5P", + "description": "ModB PSC Digital voltage monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [5.05, 5.23], + "red_limits": [-1.0, 5.5], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCA_8P", + "description": "ModB PSC +6.25VDC power-in monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [6.24, 6.26], + "red_limits": [-1.0, 6.75], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCA_8N", + "description": "ModB PSC -6.25VDC power-in monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-6.3, -6.2], + "red_limits": [-6.75, 1], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCA_5P", + "description": "ModB PSC +5VDC regulator monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [5.27, 5.28], + "red_limits": [-1.0, 5.4], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCA_5N", + "description": "ModB PSC -5VDC regulator monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-5.285, -5.27], + "red_limits": [-5.4, 1.0], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCR_7P", + "description": "ModB PSC reference +6.25V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [6.2, 6.4], + "red_limits": [-1.0, 7.5], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCR_7N", + "description": "ModB PSC reference -6.25V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-6.3, -6.0], + "red_limits": [-7.5, 1.0], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCR_5P", + "description": "ModB PSC reference +5V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [5.05, 5.25], + "red_limits": [-1.0, 5.4], + "plot_category": "ModB_PSC_voltages+currents" + }, + { + "name": "INRC_FB_PSC_VDCR_5N", + "description": "ModB PSC reference -5V analog voltage", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-5.15, -4.95], + "red_limits": [-5.4, 1.0], + "plot_category": "ModB_PSC_voltages+currents" + } + ], + "all": [ + { + "name": "INRC_A_M_PIL_AS_POS", + "description": "ModA PIL SensorA position", + "dependency": [ + { + "name": "IGDP_NRC_A_PIL_STATE", + "relation": "=", + "threshold": "DEPLOYED" + } + ], + "plot_data": "nominal", + "yellow_limits": [-23490, 20982], + "red_limits": [-24000, 24000], + "plot_category": "PIL_pos", + "mean_time_block": "1_minutes" + }, + { + "name": "INRC_A_M_PIL_BS_POS", + "description": "ModA PIL SensorB position", + "dependency": [ + { + "name": "IGDP_NRC_A_PIL_STATE", + "relation": "=", + "threshold": "DEPLOYED" + } + ], + "plot_data": "nominal", + "yellow_limits": [-22382, 17863], + "red_limits": [-24000, 24000], + "plot_category": "PIL_pos", + "mean_time_block": "1_day" + }, + { + "name": "INRC_B_M_PIL_AS_POS", + "description": "ModB PIL SensorA position", + "dependency": [ + { + "name": "IGDP_NRC_B_PIL_STATE", + "relation": "=", + "threshold": "DEPLOYED" + } + ], + "plot_data": "nominal", + "yellow_limits": [-16670, 19735], + "red_limits": [-24000, 24000], + "plot_category": "PIL_pos", + "mean_time_block": "1_day" + }, + { + "name": "INRC_B_M_PIL_BS_POS", + "description": "ModB PIL SensorB position", + "dependency": [ + { + "name": "IGDP_NRC_B_PIL_STATE", + "relation": "=", + "threshold": "DEPLOYED" + } + ], + "plot_data": "nominal", + "yellow_limits": [-24132, 20274], + "red_limits": [-24000, 24000], + "plot_category": "PIL_pos", + "mean_time_block": "1_day" + } + ], + "block_means": [ + ], + "every_change": [ + ], + "time_interval": [ + { + "name": "SE_ZBUSVLT", + "description": "Bus voltage", + "dependency": [], + "plot_data": "nominal", + "red_limits": [30.7, 31.7], + "plot_category": "Box_current", + "mean_time_block": "15_minutes" + }, + { + "name": "INRC_ICE_DC_CUR_P5_DIG", + "description": "ICE HK +5V current for Digital electronics", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.4, 0.9], + "red_limits": [-0.01, 1.408], + "plot_category": "ICE_current", + "mean_time_block": "15_minutes" + }, + { + "name": "INRC_ICE_DC_CUR_P15_ALL", + "description": "ICE HK +15V current for all functions", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.15, 0.35], + "red_limits": [-0.01, 0.8426], + "plot_category": "ICE_current", + "mean_time_block": "15_minutes" + }, + { + "name": "INRC_ICE_DC_CUR_N15_ALL", + "description": "ICE HK -15V current for all functions", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.21, 0.35], + "red_limits": [-0.01, 0.8426], + "plot_category": "ICE_current", + "mean_time_block": "15_minutes" + }, + { + "name": "INRC_ICE_DC_CUR_P15_MOT", + "description": "ICE HK +15V current for FWA & PIL motor drive", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.01, 0.34], + "red_limits": [-0.01, 0.8426], + "plot_category": "ICE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_ICE_DC_CUR_N15_MOT", + "description": "ICE HK -15V current for FWA & PIL motor drive", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.0, 0.34], + "red_limits": [-0.01, 0.8426], + "plot_category": "ICE_current", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_ICE_POS_EL", + "description": "ICE position sensor board internal Cernox sensor, SN X45683", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 286.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_ICE_DIO_EL", + "description": "ICE DIO board internal Cernox sensor, SN X45422", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 284.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_ICE_POW_EL", + "description": "ICE Power supply board internal Cernox sensor, SN X45569", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 287.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_ICE_MOT_EL", + "description": "ICE motor controller board internal Cernox sensor, SN X45432", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 288.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_ICE_POS_EL", + "description": "ICE position sensor board internal Cernox sensor, SN X45684", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 285.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_ICE_DIO_EL", + "description": "ICE DIO board internal Cernox sensor, SN X45423", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 289.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_ICE_POW_EL", + "description": "ICE power supply board internal Cernox sensor, SN X45588", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 297.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_ICE_MOT_EL", + "description": "ICE motor controller board internal Cernox sensor, SN X45436", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 290.0, + "red_limits": [248.15, 333.15], + "plot_category": "ICE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNSCR", + "description": "ModA SCR current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [309, 335], + "red_limits": [-2.5, 1400], + "plot_category": "ModA_PSC_voltages+currents", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNSCR", + "description": "ModB SCR current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [315, 345], + "red_limits": [-2.5, 1400], + "plot_category": "ModB_PSC_voltages+currents", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE1_BOARD_TEMP", + "description": "ModA ACE1 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 14.0, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE2_BOARD_TEMP", + "description": "ModA ACE2 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 16.0, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE3_BOARD_TEMP", + "description": "ModA ACE3 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 17.0, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE4_BOARD_TEMP", + "description": "ModA ACE4 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 15.0, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE5_BOARD_TEMP", + "description": "ModA ACE5 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 14.5, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE1_BOARD_TEMP", + "description": "ModB ACE1 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 15.5, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE2_BOARD_TEMP", + "description": "ModB ACE2 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 15.0, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE3_BOARD_TEMP", + "description": "ModB ACE3 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 14.5, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE4_BOARD_TEMP", + "description": "ModB ACE4 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 15.5, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE5_BOARD_TEMP", + "description": "ModB ACE5 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 15.0, + "red_limits": [-25, 60.0], + "plot_category": "ACE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_TMC1_BOARD_TEMP", + "description": "ModA TMC1 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 16.5, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_TMC2_BOARD_TEMP", + "description": "ModA TMC2 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 16.0, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_TMC1_BOARD_TEMP", + "description": "ModB TMC1 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 16.0, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_TMC2_BOARD_TEMP", + "description": "ModB TMC2 board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 13.0, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_LVCBRD_TEMP", + "description": "ModA LVC board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 23.0, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_BOARD_TEMP", + "description": "ModA PSC board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 16.5, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_SCRBRD_TEMP", + "description": "ModA SCR board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 17.5, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_LVCBRD_TEMP", + "description": "ModB LVC board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 23.5, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_BOARD_TEMP", + "description": "ModB PSC board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 15.5, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_SCRBRD_TEMP", + "description": "ModB SCR board temperature", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 17.5, + "red_limits": [-25, 60.0], + "plot_category": "FPE_board_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE1_P", + "description": "ModA ACE1 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.0, 23.0], + "red_limits": [-2.5, 50.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE1_N", + "description": "ModA ACE1 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-17.5, -15.0], + "red_limits": [-30.0, 1.5], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE1_D", + "description": "ModA ACE1 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [123, 143], + "red_limits": [-2.5, 160], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE1R_N", + "description": "ModA ACE1 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-3.6, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE1R_P", + "description": "ModA ACE1 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.2], + "red_limits": [-1.0, 6.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE2_P", + "description": "ModA ACE2 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.5, 21.4], + "red_limits": [-2.5, 50.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE2_N", + "description": "ModA ACE2 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-15.3, -15.2], + "red_limits": [-30.0, 1.5], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE2_D", + "description": "ModA ACE2 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [120, 140], + "red_limits": [-2.5, 160], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE2R_N", + "description": "ModA ACE2 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-3.8, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE2R_P", + "description": "ModA ACE2 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.4, 4.4], + "red_limits": [-1.0, 6.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE3_P", + "description": "ModA ACE3 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.4, 21.4], + "red_limits": [-2.5, 50.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE3_N", + "description": "ModA ACE3 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-15.7, -15.5], + "red_limits": [-30.0, 1.5], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE3_D", + "description": "ModA ACE3 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [120, 140], + "red_limits": [-2.5, 160], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE3_N", + "description": "ModA ACE3 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-3.6, 0.0], + "red_limits": [-6.0, 1.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE3R_P", + "description": "ModA ACE3 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.2], + "red_limits": [-1.0, 6.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE4_P", + "description": "ModA ACE4 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.3, 22.0], + "red_limits": [-2.5, 50.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE4_N", + "description": "ModA ACE4 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-15.2, -15.0], + "red_limits": [-30.0, 1.5], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE4_D", + "description": "ModA ACE4 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [122, 140], + "red_limits": [-2.5, 160], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE4R_N", + "description": "ModA ACE4 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-4.1, 0.0], + "red_limits": [-6.0, 1.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE4R_P", + "description": "ModA ACE4 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.7], + "red_limits": [-1.0, 6.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE5_P", + "description": "ModA ACE5 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.4, 23.0], + "red_limits": [-2.5, 50.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE5_N", + "description": "ModA ACE5 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-17.3, -14.9], + "red_limits": [-30.0, 1.5], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE5_D", + "description": "ModA ACE5 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [119, 138], + "red_limits": [-2.5, 160], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE5R_N", + "description": "ModA ACE5 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-4.1, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNACE5R_P", + "description": "ModA ACE5 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.6], + "red_limits": [-1.0, 6.0], + "plot_category": "ModA_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE1_P", + "description": "ModB ACE1 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.0, 22.7], + "red_limits": [-2.5, 50.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE1_N", + "description": "ModB ACE1 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-17.1, -14.8], + "red_limits": [-30.0, 1.5], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE1_D", + "description": "ModB ACE1 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [120, 139], + "red_limits": [-2.5, 160], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE1R_N", + "description": "ModB ACE1 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-4.2, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE1R_P", + "description": "ModB ACE1 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.0, 4.7], + "red_limits": [-1.0, 6.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE2_P", + "description": "ModB ACE2 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.0, 22.5], + "red_limits": [-2.5, 50.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE2_N", + "description": "ModB ACE2 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-17.1, -14.8], + "red_limits": [-30.0, 1.5], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE2_D", + "description": "ModB ACE2 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [122, 141], + "red_limits": [-2.5, 160], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE2R_N", + "description": "ModB ACE2 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-3.8, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE2R_P", + "description": "ModB ACE2 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.6], + "red_limits": [-1.0, 6.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE3_P", + "description": "ModB ACE3 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [19.5, 22.7], + "red_limits": [-2.5, 50.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE3_N", + "description": "ModB ACE3 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-17.2, -14.9], + "red_limits": [-30.0, 1.5], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE3_D", + "description": "ModB ACE3 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [119, 137], + "red_limits": [-2.5, 160], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE23_N", + "description": "ModB ACE3 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-3.6, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE3R_P", + "description": "ModB ACE3 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.3], + "red_limits": [-1.0, 6.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE4_P", + "description": "ModB ACE4 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.0, 22.7], + "red_limits": [-2.5, 50.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE4_N", + "description": "ModB ACE4 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-17.3, -14.9], + "red_limits": [-30.0, 1.5], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE4_D", + "description": "ModB ACE4 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [124, 142], + "red_limits": [-2.5, 160], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE4R_N", + "description": "ModB ACE4 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-4.0, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE4R_P", + "description": "ModB ACE4 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.6], + "red_limits": [-1.0, 6.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE5_P", + "description": "ModB ACE5 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.0, 22.8], + "red_limits": [-2.5, 50.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE5_N", + "description": "ModB ACE5 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-17.4, -15.0], + "red_limits": [-30.0, 1.5], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE5_D", + "description": "ModB ACE5 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [118, 135], + "red_limits": [-2.5, 160], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE5R_N", + "description": "ModB ACE5 -5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-3.9, -0.1], + "red_limits": [-6.0, 1.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNACE5R_P", + "description": "ModB ACE5 +5V reference current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.1, 4.6], + "red_limits": [-1.0, 6.0], + "plot_category": "ModB_ACE_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNTMC2_P", + "description": "ModA TMC2 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [33.0, 65.0], + "red_limits": [-2.5, 80.0], + "plot_category": "TMC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNTMC2_D", + "description": "ModA TMC2 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [21.0, 28.0], + "red_limits": [-2.5, 45.0], + "plot_category": "TMC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_PSC_IMNTMC2_N", + "description": "ModA TMC2 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-45.0, -28.0], + "red_limits": [-70.0, 2.5], + "plot_category": "TMC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNTMC1_P", + "description": "ModB TMC1 +5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [34.0, 66.0], + "red_limits": [-2.5, 80.0], + "plot_category": "TMC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNTMC1_D", + "description": "ModB TMC1 digital current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [20.8, 30.0], + "red_limits": [-2.5, 45.0], + "plot_category": "TMC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_PSC_IMNTMC1_N", + "description": "ModB TMC1 -5V analog current monitor", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [-46.0, -29.0], + "red_limits": [-70.0, 2.5], + "plot_category": "TMC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE1_SC_A3P3_I", + "description": "ModA ASIC1 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.0, 7.0], + "red_limits": [-1.5, 17.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE1_SC_R3P3_I", + "description": "ModA ASIC1 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.08, 0.21], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE1_SC_D3P3_I", + "description": "ModA ASIC1 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.15, 0.27], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE1_SC_D2P5_I", + "description": "ModA ASIC1 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.3, 5.0], + "red_limits": [-1.0, 6.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE1_SC_VIO_I", + "description": "ModA ASIC1 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.2, 2.3], + "red_limits": [-1.0, 5.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE2_SC_A3P3_I", + "description": "ModA ASIC2 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.5, 6.5], + "red_limits": [-1.5, 17.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE2_SC_R3P3_I", + "description": "ModA ASIC2 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.09, 0.2], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE2_SC_D3P3_I", + "description": "ModA ASIC2 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.12, 0.26], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE2_SC_D2P5_I", + "description": "ModA ASIC2 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.5, 5.5], + "red_limits": [-1.0, 6.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE2_SC_VIO_I", + "description": "ModA ASIC2 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.4, 2.4], + "red_limits": [-1.0, 5.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE3_SC_A3P3_I", + "description": "ModA ASIC3 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.3, 6.5], + "red_limits": [-1.5, 17.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE3_SC_R3P3_I", + "description": "ModA ASIC3 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.08, 0.21], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE3_SC_D3P3_I", + "description": "ModA ASIC3 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.13, 0.26], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE3_SC_D2P5_I", + "description": "ModA ASIC3 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.5, 5.0], + "red_limits": [-1.0, 6.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE3_SC_VIO_I", + "description": "ModA ASIC3 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.4, 2.7], + "red_limits": [-1.0, 5.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE4_SC_A3P3_I", + "description": "ModA ASIC4 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.2, 5.4], + "red_limits": [-1.5, 17.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE4_SC_R3P3_I", + "description": "ModA ASIC4 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.09, 0.21], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE4_SC_D3P3_I", + "description": "ModA ASIC4 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.12, 0.25], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE4_SC_D2P5_I", + "description": "ModA ASIC4 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.7, 5.0], + "red_limits": [-1.0, 6.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE4_SC_VIO_I", + "description": "ModA ASIC4 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.4, 2.3], + "red_limits": [-1.0, 5.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE5_SC_A3P3_I", + "description": "ModA ASIC5 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.7, 5.6], + "red_limits": [-1.5, 17.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE5_SC_R3P3_I", + "description": "ModA ASIC5 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.09, 0.22], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE5_SC_D3P3_I", + "description": "ModA ASIC5 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.11, 0.25], + "red_limits": [-1.0, 1.0], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE5_SC_D2P5_I", + "description": "ModA ASIC5 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.7, 5.3], + "red_limits": [-1.0, 6.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FA_ACE5_SC_VIO_I", + "description": "ModA ASIC5 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.5, 2.4], + "red_limits": [-1.0, 5.5], + "plot_category": "ModA_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE1_SC_A3P3_I", + "description": "ModB ASIC1 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.2, 6.9], + "red_limits": [-1.5, 17.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE1_SC_R3P3_I", + "description": "ModB ASIC1 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.08, 0.18], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE1_SC_D3P3_I", + "description": "ModB ASIC1 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.12, 0.24], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE1_SC_D2P5_I", + "description": "ModB ASIC1 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.6, 5.8], + "red_limits": [-1.0, 6.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE1_SC_VIO_I", + "description": "ModB ASIC1 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.2, 2.2], + "red_limits": [-1.0, 5.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE2_SC_A3P3_I", + "description": "ModB ASIC2 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.6, 6.5], + "red_limits": [-1.5, 17.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE2_SC_R3P3_I", + "description": "ModB ASIC2 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.11, 0.26], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE2_SC_D3P3_I", + "description": "ModB ASIC2 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.12, 0.27], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE2_SC_D2P5_I", + "description": "ModB ASIC2 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.8, 5.2], + "red_limits": [-1.0, 6.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE2_SC_VIO_I", + "description": "ModB ASIC2 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.4, 2.4], + "red_limits": [-1.0, 5.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE3_SC_A3P3_I", + "description": "ModB ASIC3 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [2.9, 6.3], + "red_limits": [-1.5, 17.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE3_SC_R3P3_I", + "description": "ModB ASIC3 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.05, 0.16], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE3_SC_D3P3_I", + "description": "ModB ASIC3 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.07, 0.24], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE3_SC_D2P5_I", + "description": "ModB ASIC3 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.4, 5.1], + "red_limits": [-1.0, 6.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE3_SC_VIO_I", + "description": "ModB ASIC3 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.3, 2.2], + "red_limits": [-1.0, 5.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE4_SC_A3P3_I", + "description": "ModB ASIC4 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.6, 5.3], + "red_limits": [-1.5, 17.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE4_SC_R3P3_I", + "description": "ModB ASIC4 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.08, 0.22], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE4_SC_D3P3_I", + "description": "ModB ASIC4 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.11, 0.27], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE4_SC_D2P5_I", + "description": "ModB ASIC4 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.7, 5.4], + "red_limits": [-1.0, 6.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE4_SC_VIO_I", + "description": "ModB ASIC4 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.5, 2.4], + "red_limits": [-1.0, 5.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE5_SC_A3P3_I", + "description": "ModB ASIC5 3.3V analog current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [3.6, 6.9], + "red_limits": [-1.5, 17.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE5_SC_R3P3_I", + "description": "ModB ASIC5 3.3V reference current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.08, 0.21], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE5_SC_D3P3_I", + "description": "ModB ASIC5 3.3V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.12, 0.24], + "red_limits": [-1.0, 1.0], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE5_SC_D2P5_I", + "description": "ModB ASIC5 2.5V digital current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.7, 5.1], + "red_limits": [-1.0, 6.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_FB_ACE5_SC_VIO_I", + "description": "ModB ASIC5 VIO LVDS current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.5, 2.5], + "red_limits": [-1.0, 5.5], + "plot_category": "ModB_ASIC_current", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FA_TMC2_LWMV_T", + "description": "ModA LW FPA Cernox Secondary T-sensor reading", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.8, + "red_limits": [35.0, 314], + "plot_category": "FPA_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FA_TMC2_SWMV_T", + "description": "ModA SW FPA Cernox Secondary T-sensor reading", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.5, + "red_limits": [35.0, 314], + "plot_category": "FPA_temp", + "mean_time_block": "1_hour" + }, + + { + "name": "IGDP_NRC_FB_TMC1_LWMV_T", + "description": "ModB LW FPA Cernox Primary T-sensor reading", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 37.9, + "red_limits": [35.0, 314], + "plot_category": "FPA_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FB_TMC1_SWMV_T", + "description": "ModB SW FPA Cernox Primary T-sensor reading", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.0, + "red_limits": [35.0, 314], + "plot_category": "FPA_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FA_ACE1_SCTEMP", + "description": "ModA ASIC1 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.5, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FA_ACE2_SCTEMP", + "description": "ModA ASIC2 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.3, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FA_ACE3_SCTEMP", + "description": "ModA ASIC3 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.4, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FA_ACE4_SCTEMP", + "description": "ModA ASIC4 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.3, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FA_ACE5_SCTEMP", + "description": "ModA ASIC5 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.4, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FB_ACE1_SCTEMP", + "description": "ModB ASIC1 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 40.3, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FB_ACE2_SCTEMP", + "description": "ModB ASIC2 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 40.35, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FB_ACE3_SCTEMP", + "description": "ModB ASIC3 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 40.3, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FB_ACE4_SCTEMP", + "description": "ModB ASIC4 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 40.3, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_FB_ACE5_SCTEMP", + "description": "ModB ASIC5 Temperature read by ACE card", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.5, + "red_limits": [35.0, 314], + "plot_category": "ASIC_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_LWFPAH1", + "description": "ModA LW FPA Housing Primary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.45, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_LWFPAH2", + "description": "ModA LW FPA Housing Secondary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.75, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_SWFPAH1", + "description": "ModA SW FPA Housing Primary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.75, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_SWFPAH2", + "description": "ModA SW FPA Housing Secondary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.9, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_LWFPAH1", + "description": "ModB LW FPA Housing Primary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.5, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_LWFPAH2", + "description": "ModB LW FPA Housing Secondary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.6, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_SWFPAH1", + "description": "ModB SW FPA Housing Primary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.05, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_SWFPAH2", + "description": "ModB SW FPA Housing Secondary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.1, + "red_limits": [35.0, 308], + "plot_category": "FPAH_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_OBA_TS1", + "description": "ModA Optical Bench Primary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.15, + "red_limits": [35.0, 314], + "plot_category": "OBA_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_OBA_TS2", + "description": "ModA Optical Bench Secondary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.35, + "red_limits": [35.0, 314], + "plot_category": "OBA_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_OBA_TS1", + "description": "ModB Optical Bench Primary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.25, + "red_limits": [35.0, 314], + "plot_category": "OBA_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_OBA_TS2", + "description": "ModB Optical Bench Secondary Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.4, + "red_limits": [35.0, 314], + "plot_category": "OBA_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_A_T_PIL", + "description": "ModA PIL Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 39.8, + "red_limits": [35.0, 314], + "plot_category": "PIL_temp", + "mean_time_block": "1_hour" + }, + { + "name": "IGDP_NRC_B_T_PIL", + "description": "ModB PIL Cernox T-sensor", + "dependency": [], + "plot_data": "nominal", + "nominal_value": 38.3, + "red_limits": [35.0, 314], + "plot_category": "PIL_temp", + "mean_time_block": "1_hour" + }, + { + "name": "INRC_SW_TMR_PIL", + "description": "NFSW PIL timeout timer value", + "database_id": "INRC_SW_TMR_PIL_A", + "dependency": [ + { + "name": "IGDP_NRC_A_PIL_STATE", + "relation": "=", + "threshold": "DEPLOYED" + } + ], + "plot_data": "nominal", + "plot_category": "PIL_pos", + "mean_time_block": "1_day" + }, + { + "name": "INRC_SW_TMR_PIL", + "description": "NFSW PIL timeout timer value", + "database_id": "INRC_SW_TMR_PIL_B", + "dependency": [ + { + "name": "IGDP_NRC_B_PIL_STATE", + "relation": "=", + "threshold": "DEPLOYED" + } + ], + "plot_data": "nominal", + "plot_category": "PIL_pos", + "mean_time_block": "1_day" + }, + { + "name": "INRC_FA_TMC2_LW_SEN_SET", + "description": "ModA TMC2 LW FPA heater setpoint", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [1890, 1940], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "INRC_FA_TMC2_SW_SEN_SET", + "description": "ModA TMC2 SW FPA heater setpoint", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [1055, 1105], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "INRC_FB_TMC1_LW_SEN_SET", + "description": "ModB TMC1 LW FPA heater setpoint", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [800, 1900], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "INRC_FB_TMC1_SW_SEN_SET", + "description": "ModB TMC1 SW FPA heater setpoint", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [700, 2200], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FA_TMC2_LW_TS1", + "description": "T-offset for ModA LW FPA relative to zero-power, based on comparing TMC2 and OBA TS1 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FA_TMC2_LW_TS2", + "description": "T-offset for ModA LW FPA relative to zero-power, based on comparing TMC2 and OBA TS2 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FA_TMC2_SW_TS1", + "description": "T-offset for ModA SW FPA relative to zero-power, based on comparing TMC2 and OBA TS1 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FA_TMC2_SW_TS2", + "description": "T-offset for ModA SW FPA relative to zero-power, based on comparing TMC2 and OBA TS2 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FB_TMC1_LW_TS1", + "description": "T-offset for ModB LW FPA relative to zero-power, based on comparing TMC1 and OBA TS1 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FB_TMC1_LW_TS2", + "description": "T-offset for ModB LW FPA relative to zero-power, based on comparing TMC1 and OBA TS2 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FB_TMC1_SW_TS1", + "description": "T-offset for ModB SW FPA relative to zero-power, based on comparing TMC1 and OBA TS1 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + }, + { + "name": "IGDP_NRC_FB_TMC1_SW_TS2", + "description": "T-offset for ModB SW FPA relative to zero-power, based on comparing TMC1 and OBA TS2 sensors", + "dependency": [], + "plot_data": "nominal", + "red_limits": [0, 1], + "plot_category": "FPA_heater_setpt", + "mean_time_block": "1_day" + } + ] +} \ No newline at end of file diff --git a/jwql/instrument_monitors/common_monitors/edb_monitor_data/niriss_mnemonics_to_monitor.json b/jwql/instrument_monitors/common_monitors/edb_monitor_data/niriss_mnemonics_to_monitor.json new file mode 100644 index 000000000..323586fd4 --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_monitor_data/niriss_mnemonics_to_monitor.json @@ -0,0 +1,174 @@ +{ + "all": [ + { + "name": "SA_ZHGAUPST", + "description": "High Gain Antenna update status", + "dependency": [], + "plot_data": "nominal", + "plot_category": "observatory" + }, + { + "name": "INIS_PWC_MOV", + "description": "Pupil wheel movement", + "dependency": [], + "plot_data": "nominal", + "plot_category": "FW/PW" + }, + { + "name": "INIS_CMD_ACC_CNT", + "description": "ISIM NIS Command Accepted Counter", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Commanding" + }, + { + "name": "INIS_FPATEMP", + "description": "NIS HW Engineering Telemetry Focus Plate Electronics FPA Temperature", + "dependency": [ + { + "name": "INIS_SCE_ASICTEMP2", + "relation": "<", + "threshold": 22000 + } + ], + "plot_data": "nominal", + "plot_category": "Temperatures" + }, + { + "name": "INIS_SCE_FPATEMP1_2", + "description": "NIS SIDECAR Housekeeping FPA Temperature 1 word 2", + "dependency": [ + { + "name": "INIS_SCE_ASICTEMP2", + "relation": "<", + "threshold": 22000 + } + ], + "plot_data": "nominal", + "plot_category": "Temperatures" + }, + { + "name": "INIS_SCE_FPATEMP2_2", + "description": "NIS SIDECAR Housekeeping FPA Temperature 2 word 2", + "dependency": [ + { + "name": "INIS_SCE_ASICTEMP2", + "relation": "<", + "threshold": 22000 + } + ], + "plot_data": "nominal", + "plot_category": "Temperatures" + }, + { + "name": "INIS_SCE_ASICTEMP1", + "description": "NIS SIDECAR Housekeeping ASIC Temperature 1", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures", + "mean_time_block": "15_minutes" + }, + { + "name": "INIS_SCE_FPATEMP1_1", + "description": "NIS SIDECAR Housekeeping FPA Temperature 1 word 1", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures" + }, + { + "name": "INIS_SCE_FPATEMP2_1", + "description": "NIS SIDECAR Housekeeping FPA Temperature 2 word 1", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures" + } + ], + "all+time_interval": [ + ], + "time_interval": [ + { + "name": "INIS_ASICTEMP", + "description": "NIS HW Engineering Telemetry Focus Plate Electronics ASIC Temperature", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures", + "mean_time_block": "15_minutes" + }, + { + "name": "INIS_FPATEMP", + "description": "NIS HW Engineering Telemetry Focus Plate Electronics FPA Temperature", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures", + "mean_time_block": "15_minutes" + }, + { + "name": "INIS_SCE_ASICTEMP2", + "description": "NIS SIDECAR Housekeeping ASIC Temperature 2", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures", + "mean_time_block": "15_minutes" + }, + { + "name": "INIS_SCE_FPATEMP1_2", + "description": "NIS SIDECAR Housekeeping FPA Temperature 1 word 2", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures", + "mean_time_block": "15_minutes" + }, + { + "name": "INIS_SCE_FPATEMP2_2", + "description": "NIS SIDECAR Housekeeping FPA Temperature 2 word 2", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures", + "mean_time_block": "15_minutes" + }, + { + "name": "INIS_TMP_CFM", + "description": "CFM Temperature", + "dependency": [], + "plot_data": "nominal", + "plot_category": "Temperatures", + "mean_time_block": "15_minutes" + }, + { + "name": "SA_ZATTEST1", + "description": "First component quaternion", + "dependency": [], + "plot_data": "nominal", + "plot_category": "quaternions", + "mean_time_block": "30_minutes" + }, + { + "name": "SA_ZATTEST2", + "description": "Second component quaternion", + "dependency": [], + "plot_data": "nominal", + "plot_category": "quaternions", + "mean_time_block": "30_minutes" + }, + { + "name": "SA_ZATTEST3", + "description": "Third component quaternion", + "dependency": [], + "plot_data": "nominal", + "plot_category": "quaternions", + "mean_time_block": "30_minutes" + }, + { + "name": "SA_ZATTEST4", + "description": "Fourth component quaternion", + "dependency": [], + "plot_data": "nominal", + "plot_category": "quaternions", + "mean_time_block": "30_minutes" + } + ] +} + + + + diff --git a/jwql/instrument_monitors/common_monitors/edb_monitor_data/nirspec_mnemonics_to_monitor.json b/jwql/instrument_monitors/common_monitors/edb_monitor_data/nirspec_mnemonics_to_monitor.json new file mode 100644 index 000000000..07c1edeca --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_monitor_data/nirspec_mnemonics_to_monitor.json @@ -0,0 +1,36 @@ +{ + "all": [ + { + "name": "IGDPM_MSA_Q1_365IDD", + "database_id": "IGDPM_MSA_Q1_365IDD", + "description": "NIRSpec Mce Quad 1 Vdd 365 current", + "dependency": [ + { + "name": "IGDPM_MSA_Q1_365IDD", + "relation": ">", + "threshold": 0.001 + } + ], + "plot_data": "nominal", + "plot_category": "Currents" + }, + { + "name": "INRSD_A1_IPREAMPBIAS_V", + "database_id": "INRSD_A1_IPREAMPBIAS_V", + "description": "NIRSpec ASIC1 IPreAmpBias voltage measurement (Preamp bias v)", + "dependency": [ + { + "name": "INRSD_A1_IPREAMPBIAS_V", + "relation": ">", + "threshold": 1.5 + } + ], + "plot_data": "nominal", + "plot_category": "Voltages" + } + ] +} + + + + diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py new file mode 100755 index 000000000..e87016faf --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -0,0 +1,2198 @@ +#! /usr/bin/env python + +""" +###################################################################### +Engineering Database Mnemonics Trending Monitor (EDB Trending Monitor) +###################################################################### + +This module contains code for the Engineering Database Telemetry monitor. For a given mnemonic, +this monitor retrieves telemetry data from the EDB, filters the data based on optional conditions, +calculates some basic statistics on the data, and plots the data. + +There is a list of mnemonics to monitor for each instrument, in the form of a json file. For each +mnemonic, the file specifies the conditions for filtering the data. This may include filtering based +on the values of the mnemonic's own data (e.g. keep only entries where the voltage is < 0.25V), and/or +filtering based on the values of dependency mnemonics. For example, keep data for mnemonic A only +when mnemonic B is > 0.25V and mnemonic C is less than 38K. + +Statistics +---------- + +After filtering the data, the monitor calcualtes statistics. The monitor supports several different types +averaging. These include: + +1. **daily\_means** - This is designed for mnemonics whose values do not change much over the course of a +day. In this case, mnemonic data is retrieved over a small amount of time each day (e.g. 12:00 - 12:15). +From these data, a daily mean is calculated. For all other types of telemetry, the EDB queries span +the full day. + +2. **block\_means** - These are mnemonics where the user wishes to see mean values associated with each +block of entries in the retrieved and filtered data. For example, you want to examine a voltage at times +when some other current is less than 0.25A. The script will read in all telemetry data, and filter out +data points for times where the current did not meet the criteria. It will then calculate the mean of +each remaining block of continuous good data. So if the data were good from 2:00 to 2:30, then bad until +3:00, and good again from 3:00-4:00, then the monitor will calculate a mean value for the 2:00-2:30 +period, and a mean from the 3:00-4:00 period. + +3. **time\_interval** - Mnemonics in this category have their data retrieved and filtered, and then averaged +over the requested time interval. For example, if the user sets a time interval of 5 minutes, then the +monitor caculates the mean value within each 5-minute block of the total time range of the data, and plots +the average values. + +4. **every\_change** - This is the most complex case. Mnemonics in this category have their data filtered +and organized based on the value of a secondary mnemonic. For example, the IMIR\_HK\_GW14\_POS\_RATIO returns +a measure of the position of MIRI's grating wheel. We can plot this position as a function of the commanded +location of the grating wheel, which is provided by IMIR\_HK\_GW14\_CUR\_POS. In this case, the monitor will +loop over the commanded positions and for each, gather the measured position information. The measured +positions associated with each commanded position will then be plotted separately. Note that this use +of "every change" is separate from the idea of every-change telemetry, in which telemetry points are +only generated at times when the telemetry value changes. Some of the mnemonics in the EDB do contain +change-only telemetry data, but this should be largely invisible to the EDB Telemetry Monitor user. + +5. **all** - In this case, no averaging is done. (Although filtering is still done) All filtered data +are kept as they are retrived from the EDB, and plotted without any modification. + +6. **all+daily\_means** - This is a combination of the "all" and "daily\_means" cases above. All data points +are retrieved from the EDB and optionally filtered by dependencies. Then daily means are calculated. +Both the full set of data and the daily means are plotted, along with deviations from the mean. + +7. **all+block\_means** - This is a combination of the "all" and "block\_means" cases above. All data points +are retrieved from the EDB and optionally filtered by dependencies. Then means for each block of good data +are calculated. Both the full set of data and the means are plotted, along with deviations from the mean. + +8. **all+time\_interval** - This is a combination of the "all" and "time\_interval" cases above. All data points +are retrieved from the EDB and optionally filtered by dependencies. Then means are calculated for each block +of time lasting the duration of the time interval. Both the full set of data and the means are plotted, along +with deviations from the mean. + +JSON file format +---------------- + +The type of data averaging is at the top level of the JSON file. Values must match the 8 types described above. +The entry for each mnemonic has several pieces of information, described below. + +- **name**: Name of the mnemonic as it appears in the EDB. +- **database\_id** Optional name to use in the plot title for this mnemonic. Any averaged data saved to the JWQL database will be saved under this name if it is present. +- **description**: Summary describing the data contained in the mnemonic. Placed in the plot title. +- **dependency**: This is a list of mnemonics and conditions that will be used to filter the data +- **plot\_data**: Description of how the data are to be plotted. There are two options: "nominal", in which case + the mnemonic data are plotted as-is, and "*" where is the name of another mnemonic. In this case, the + data for this second mnemonic are retrieved using the same dependencies as the primary mnemonic. The primary mnemonic + and this second mnemonic are then multiplied together and plotted. This option was designed around plotting power as + the product of current and voltage. + +A further option for the **"plot\_data"** field is the addition of a comma-separated list of statistics to be overplotted. +Options are: "mean", "median", "max", and "min". Note that this is a little confusing, because in many cases the menmonic's +data will already contain the median value of the data (and the original data as returned from the EDB will not be +available). The monitor realized this though, so if you specify "mean" for a mnemonic in the "daily\_mean" list, it will simply +plot the same data twice, on top of itself. + +As an example, in order to plot the daily mean and maximum values of the product of SE\_ZIMIRICEA and SE\_ZBUSVLT, the plot\_data +entry would be: "*SE\_ZBUSVLT,max". If you also wanted to plot the minimum daily value, the entry would be: "*SE\_ZBUSVLT,max,min". +And similarly, to plot SE\_ZIMIRICEA on its own (not as a product), the plot\_data entries shown above would become: "nominal,max" +and "nominal,max,min". + +* **nominal_value**: Optional. The "expected" value for this mnemonic. If provided, a horizontal dashed line will be added at this value. +* **yellow_limits**: Optional. This is a list of two values that describe the lower and upper limits of the expected range of the mnemonic's value. If these are present, a green background is added to the plot at values between these limits. Outside of these limits, the background is yellow. +* **red_limits**: Optional. Similar to yellow_limits above. In this case the lower and upper limits represent the thresholds outside of which there may be a problem. In this case, the background of the plot outside of these values is red. +* **plot_category**: This is the name of the tab on the website into which this plot should be placed. +* **mean_time_block**: Optional. This is only used for ``time_interval`` mnemonics. It describes the length of time over which to bin and average the data. The value consists of a number and a unit of time: e.g. "15_minutes" + +Below we present details on how to construct json entries for these specific cases. + +"daily_means" entries +===================== +Here is an example of two **daily_mean** telemetry entries in the json file. In both, SE_ZIMIRICEA values are retrieved. +For the first plot, data are only kept for the times where the following dependencies are true: + +1. SE_ZIMIRICEA is > 0.2 A +2. IMIR_HK_ICE_SEC_VOLT1 is < 1 V +3. IMIR_HK_IMG_CAL_LOOP is OFF +4. IMIR_HK_IFU_CAL_LOOP is OFF +5. IMIR_HK_POM_LOOP is OFF + +For the second plot, data are kept for the times where: + +1. IMIR_HK_ICE_SEC_VOLT1 is > 25 V + +Note that the "database_id" entry is used to differentiate the plot labels of these two cases, as well as their entries in the JWQLDB. +In both cases, the data are plotted as the product of SE_ZIMIRICEA and SE_ZBUSVLT. In the second case, the maximum daily value is also +plotted. Both plots are placed in the ``power`` tab on the webpage. + +.. code-block:: json + + { + "daily_means": [ + { + "name": "SE_ZIMIRICEA", + "database_id": "SE_ZIMIRICEA_NO_OPS", + "description": "ICE drive current (no ops)", + "dependency": [ + { + "name": "SE_ZIMIRICEA", + "relation": ">", + "threshold": 0.2 + }, + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": "<", + "threshold": 1 + }, + { + "name": "IMIR_HK_IMG_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_IFU_CAL_LOOP", + "relation": "=", + "threshold": "OFF" + }, + { + "name": "IMIR_HK_POM_LOOP", + "relation": "=", + "threshold": "OFF" + } + ], + "plot_data": "*SE_ZBUSVLT", + "nominal_value": 7.57, + "yellow_limits": [7.0, 8.13], + "plot_category": "power" + }, + { + "name": "SE_ZIMIRICEA", + "database_id": "SE_ZIMIRICEA_OPS", + "description": "ICE drive current (ops)", + "dependency": [ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": ">", + "threshold": 25 + } + ], + "plot_data": "*SE_ZBUSVLT,max", + "nominal_value": 11.13, + "yellow_limits": [10.23, 12.02], + "plot_category": "power" + } + ] + } + + +For a case with no dependencies, the "dependencies" keyword can be left empty: + +.. code-block:: json + + { + "name": "INRC_ICE_DC_VOL_P5_DIG", + "description": "ICE HK +5V voltage for digital electronics", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [4.99, 5.04], + "red_limits": [4.5, 5.5], + "plot_category": "ice_voltage" + } + + +"block_means" entries +===================== +In the example shown below, we want to plot IMIR_HK_ICE_SEC_VOLT1 at times when it has values higher +than 25V. In this case, the EDB monitor will find times when the voltage is under the 25V limit. These +times will separate the blocks of time when the voltage does meet the threshold value. It then calculates +and plots the median voltage within each of these blocks. + +.. code-block:: json + + "block_means":[ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "database_id": "IMIR_HK_ICE_SEC_VOLT1_OPS", + "description": "ICE Secondary Voltage (HV) 1", + "dependency": [ + { + "name": "IMIR_HK_ICE_SEC_VOLT1", + "relation": ">", + "threshold": 25 + } + ], + "plot_data": "nominal", + "nominal_value": 39.24, + "yellow_limits": [39.14, 39.34], + "plot_category": "ICE_voltage" + } + +"time_interval" entries +======================= + +For mnemonics to be averaged over some time period, use the "mean_time_block" entry. +The value of mean_time_block should be a number followed by an underscore and a unit +of time. Currently, the unit must contain one of "min", "sec", "hour", or "day". The +monitor looks for one of these strings within the mean_time_block entry, meaning that +"second", "seconds", "minutes", "minute", "hours", "days", etc are all valid entries. + +In the example below, the EDB monitor will bin the SE_ZINRCICE1 data into 5 minute blocks, +and calculate and plot the mean of each block. + +.. code-block:: json + + "time_interval": [ + { + "name": "SE_ZINRCICE1", + "description": "ICE1 current", + "mean_time_block": "5_min", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.36, 0.8], + "red_limits": [0, 1.367], + "plot_category": "box_current" + } + ] + +"every_change" entries +====================== + +This is a complex case and at the moment is customized for the MIRI filter wheel position +mnemonics such as IMIR_HK_FW_POS_RATIO. In this case, the EDB monitor will retrieve data for +the filter wheel position, which is a float at each time. It will also retrive the commmanded +position of the filter wheel, which is a string at each time (e.g. OPEN, CLOSED). It then divides +the filter wheel postiion data into groups based on the value of the commanded position (i.e. group +together all of the postion data when the commanded position is OPEN). It then computes the median +value of the filter position within each continuous block of time where the commanded position is +constant. This median value is then normalized by the expected location value (retrieved from +constants.py). One line is plotted for each commanded position. + +.. code-block:: json + + "every_change": [ + { + "name": "IMIR_HK_FW_POS_RATIO", + "description": "FW normalized position sensor voltage ratio", + "dependency": [ + { + "name": "IMIR_HK_FW_CUR_POS", + "relation": "none", + "threshold": 0 + } + ], + "plot_data": "nominal", + "yellow_limits": [-1.6, 1.6], + "plot_category": "Position_sensors" + } + ] + +"all" entries +============= +In this case, no grouping or averaging of data from the EDB are done. Data are retrieved from the EDB, +filtered by any dependencies, and plotted. + +.. code-block:: json + + "all": [ + { + "name": "SE_ZINRCICE1", + "description": "ICE1 current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.36, 0.8], + "red_limits": [0, 1.367], + "plot_category": "box_current" + }, + { + "name": "SE_ZINRCICE2", + "description": "ICE2 current", + "dependency": [], + "plot_data": "nominal", + "yellow_limits": [0.38, 0.86], + "red_limits": [0, 1.372], + "plot_category": "box_current" + } + ] + +"all+daily_means" entries +========================= +This is a combination of the "daily_means" and "all" cases above. + +"all+block_means" entries +========================= +This is a combination of the "all" and "block_means" cases above. + +"all+time_interval" entries +=========================== +This is a combination of the "all" and "time_interval" cases above. + + +Summary of the EDB monitor operation +------------------------------------ + +The monitor is set up to find the total span of time over which the plots are requested +(with the default being contolled by the value in jwql.utils.constants). It loops over +each mnemonic and breaks the total time to be queried up into 24-hour long blocks. It then +queries the EDB once for each day-long block, filters the data based on any dependencies given +and then calculates statistics. Breaking the queries up into day-long blocks is done in order +to avoid having the EDB return a very large table, which could cause memory problems, or slow +the monitor down. This is a possibility because in some cases, mnemonic values are present at +cadences of > 1 Hz. + +After data are filtered and averaged (and combined with the data of a second mnemonic if they +are being plotted as a product), any new data are saved to the JWQL database. This will prevent +having to repeat the calculations during future queries. For mnemonics where no averaging is +done, we do not save anything in the JWQL databases, in order to save memory. + +Each time a query is initiated, the JWQL database is checked first and any relevent data are +retrieved. In this way, we only query the EDB for new data. + +The monitor creates one plot for each specified mnemonic. These plots are organized into +"plot_categories", as seen in the json examples above. All plots for a given category are placed +together in a bokeh gridplot. Each gridplot (i.e. plot_category) is then placed in a separate +bokeh tab, in order to try and keep related plots together while not overwhelming the user +with too many plots at once. The tabbed plots are written out to a json file. When the user +clicks on the EDB Telemetry Monitor link on the web app, this json file is read in and embedded +into the html file that is displayed. With this method, EDB queries and data calculations are all +done asynchronously, which means that the EDB Telemetry Monitor web page shoudl be fast to load. + +Author +------ + - Bryan Hilbert + +Use +--- + This module can be called from the command line like this: + + :: + + python edb_telemetry_monitor.py + +""" +import argparse +from collections import defaultdict +from copy import deepcopy +import datetime +import json +import logging +import numpy as np +import os +from requests.exceptions import HTTPError +import urllib + +from astropy.stats import sigma_clipped_stats +from astropy.table import Table +from astropy.time import Time, TimeDelta +import astropy.units as u +from bokeh.embed import components, json_item +from bokeh.layouts import gridplot +from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d +from bokeh.models.widgets import Tabs, Panel +from bokeh.plotting import figure, output_file, save, show +from bokeh.palettes import Turbo256 +from jwql.database import database_interface +from jwql.database.database_interface import NIRCamEDBDailyStats, NIRCamEDBBlockStats, \ + NIRCamEDBTimeIntervalStats, NIRCamEDBEveryChangeStats, NIRISSEDBDailyStats, NIRISSEDBBlockStats, \ + NIRISSEDBTimeIntervalStats, NIRISSEDBEveryChangeStats, MIRIEDBDailyStats, MIRIEDBBlockStats, \ + MIRIEDBTimeIntervalStats, MIRIEDBEveryChangeStats, FGSEDBDailyStats, FGSEDBBlockStats, \ + FGSEDBTimeIntervalStats, FGSEDBEveryChangeStats, NIRSpecEDBDailyStats, NIRSpecEDBBlockStats, \ + NIRSpecEDBTimeIntervalStats, NIRSpecEDBEveryChangeStats, session, engine +from jwql.edb import engineering_database as ed +from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import condition +from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import utils +from jwql.shared_tasks.shared_tasks import only_one +from jwql.utils import monitor_utils +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils.constants import EDB_DEFAULT_PLOT_RANGE, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, MIRI_POS_RATIO_VALUES +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import ensure_dir_exists, get_config + + +ALLOWED_COMBINATION_TYPES = ['all+daily_means', 'all+block_means', 'all+every_change', 'all+time_interval'] + + +class EdbMnemonicMonitor(): + """Class for executing the EDB Telemetry Monitor + + This class will search for and retrieve new telemetry data associated with the given + mnemonics from the engineering database. These data will be filtered based on + dependency menmonic details, and optioanlly averaged over some time period. These + data will then be combined with data from previous runs of the EDB Telemetry + Monitor, which have been stored in the JWQL database. The resulting data is + then plotted. + + Attributes + ---------- + query_results : dict + Dictionary containing EDB query results for all mnemonics for the current session. + + figures : dict + Dictionary of Bokeh figures. Keys are the plot_category, and values are lists of + Bokeh figure objects. + + history_table : sqlalchemy table + Table containing a history of the queries made for the mnemonic type + + _usename : str + Key to use when specifying the mnemonic's identity + + query_cadence : datetime.datetime + How often the EDB should be queried. Currently set to 1 day. + + plot_output_dir : str + Directory into which the json file containing the gridded plots should be saved + + instrument : str + Name of the instrument whose mnemonics are being investigated + + _plot_start : datetime.datetime + Fallback date for the plot start. Only used if the plot contains no data. + + _plot_end : datetime.datetime + Fallback date for the plot end. Only used if the plot contains no data. + + requested_start_time : datetime.datetime + Earliest start time for the current run of the EDB Monitor + + requested_end_time : datetime.datetime + Latest end time for the current run of the EDB Monitor + + + Raises + ------ + NotImplementedError + If multiple dependencies are provided for an "every_change" mnemonic, or if the + dependency values are not strings + + ValueError + If the user requests to plot every_change data as the product of two mnemonics + + ValueError + If the user gives a list of mnemonics to query (rather than getting them from a + json file), but starting and ending dates for the plot are not specified + + NotImplementedError + If the user specifies a plot type other than "nominal" or the product of two + mnemonics + + ValueError + If the user calls plot_every_change_data and requests multiple output types + for the resulting figure + + """ + def __init__(self): + self.query_results = {} + + def add_figure(self, fig, key): + """Add Bokeh figure to the dictionary of figures + + Parameters + ---------- + fig : bokeh.plotting.figure + Plot of a single mnemonic + + key : str + Key under which to store the plot + """ + if key in self.figures: + self.figures[key].append(fig) + else: + self.figures[key] = [fig] + + def add_new_block_db_entry(self, mnem, query_time): + """Add a new entry to the database table for any kind + of telemetry type other than "all" (which does not save + data in the database) and "every_change" (which needs a + custom table.) + + Parameters + ---------- + mnem : jwql.edb.engineering_database.EdbMnemonic + Mnemonic information + + query_time : datetime.datetime + Start time of the query + """ + logging.info(f"Adding new entry for {mnem.mnemonic_identifier} to history table.") + times = mnem.data["dates"].data + data = mnem.data["euvalues"].data + stdevs = mnem.stdev + times = ensure_list(times) + data = ensure_list(data) + stdevs = ensure_list(stdevs) + medians = ensure_list(mnem.median) + maxs = ensure_list(mnem.max) + mins = ensure_list(mnem.min) + db_entry = {'mnemonic': mnem.mnemonic_identifier, + 'latest_query': query_time, + 'times': times, + 'data': data, + 'stdev': stdevs, + 'median': medians, + 'max': maxs, + 'min': mins, + 'entry_date': datetime.datetime.now() + } + with engine.begin() as connection: + connection.execute(self.history_table.__table__.insert(), db_entry) + + def add_new_every_change_db_entry(self, mnem, mnem_dict, dependency_name, query_time): + """Add new entries to the database table for "every change" + mnemonics. Add a separate entry for each dependency value. + + Parameters + ---------- + mnem : str + Name of the mnemonic whose data is being saved + + mnem_dict : dict + Dictionary containing every_change data as output by organize_every_change() + + dependency_name : str + Name of mnemonic whose values the changes in mnemonic are based on + + query_time : datetime.datetime + Start time of the query + """ + # We create a separate database entry for each unique value of the + # dependency mnemonic. + logging.info(f"Adding new entries for {mnem} to history table.") + for key, value in mnem_dict.items(): + (times, values, medians, stdevs) = value + times = ensure_list(times) + values = ensure_list(values) + + db_entry = {'mnemonic': mnem, + 'dependency_mnemonic': dependency_name, + 'dependency_value': key, + 'mnemonic_value': values, + 'time': times, + 'median': medians, + 'stdev': stdevs, + 'latest_query': query_time, + 'entry_date': datetime.datetime.now() + } + with engine.begin() as connection: + connection.execute( + self.history_table.__table__.insert(), db_entry) + + def calc_timed_stats(self, mnem_data, bintime, sigma=3): + """Not currently used. + Calculate stats for telemetry using time-based averaging. + This works on data that have potentially been filtered. How do + we treated any breaks in the data due to the filtering? Enforce + a new bin at each filtered block of data? Blindly average by + time and ignore any missing data due to filtering? The former + makes more sense to me + + Parameters + ---------- + mnem_data : jwql.edb.engineering_database.EdbMnemonic + Mnemonic data to be averaged + + bintime : astropy.time.Quantity + Time to use for binning and averaging data + + Returns + ------- + all_means : list + List of mean values + + all_meds : list + List of median values + + all_stdevs : list + List of stadnard deviations + + all_times : list + List of times associated with the means, medians, and standard deviations + """ + all_means = [] + all_meds = [] + all_stdevs = [] + all_times = [] + + minimal_delta = 1 * u.sec # modify based on units of time + for i in range(len(mnem_data.blocks) - 1): + block_min_time = mnem_data.data["dates"][mnem_data.blocks[i]] + block_max_time = mnem_data.data["dates"][mnem_data.blocks[i + 1]] + bin_times = np.arange(block_min_time, block_max_time + minimal_delta, bintime) + all_times.extend((bin_times[1:] - bin_times[0:-1]) / 2.) # for plotting later + + for b_idx in range(len(bin_times) - 1): + good_points = np.where((mnem_data.data["dates"] >= bin_times[b_idx]) & (mnem_data.data["dates"] < bin_times[b_idx + 1])) + bin_mean, bin_med, bin_stdev = sigma_clipped_stats(mnem_data.data["data"][good_points], sigma=sigma) + all_means.append(bin_mean) + all_meds.append(bin_med) + all_stdevs.append(bin_stdev) + return all_means, all_meds, all_stdevs, all_times + + @log_fail + @log_info + @only_one(key='edb_monitor') + def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): + """Top-level wrapper to run the monitor. Take a requested list of mnemonics to + process, or assume that mnemonics will be processed. + + Parameters + ---------- + mnem_to_query : dict + Mnemonic names to query. This should be a dictionary with the instrument + names as keys and a list of mnemonic names as the value. This option is + intended for use when someone requests, from the website, an expanded timeframe + compared to the default. The monitor will then look up the details + of each mnemonic (i.e. dependencies, averaging) from the standard + json file, and will run the query using query_start and query_end. + + plot_start : datetime.datetime + Start time to use for the query when requested from the website. Note + that the query will be broken up into multiple queries, each spanning + the default amount of time, in order to prevent querying for too much + data at one time. + + plot_end : datetime.datetime + End time to use for the query when requested from the website. + """ + # This is a dictionary that will hold the query results for multiple mnemonics, + # in an effort to minimize the number of EDB queries and save time. + self.query_results = {} + + # The cadence with which the EDB is queried. This is different than the query + # duration. This is the cadence of the query starts, while the duration is the + # block of time to query over. For example, a cadence of 1 day and a duration + # of 15 minutes means that the EDB will be queried over 12:00am - 12:15am each + # day. + self.query_cadence = datetime.timedelta(days=1) + + # Set up directory structure to hold the saved plots + config = get_config() + base_dir = os.path.join(config["outputs"], "edb_telemetry_monitor") + ensure_dir_exists(base_dir) + + # Case where the user is requesting the monitor run for some subset of + # mnemonics for some non-standard time span + if mnem_to_query is not None: + if plot_start is None or plot_end is None: + raise ValueError(("If mnem_to_query is provided, plot_start and plot_end " + "must also be provided.")) + + for instrument_name in JWST_INSTRUMENT_NAMES: + if instrument_name in mnem_to_query: + # Read in a list of mnemonics that the instrument teams want to monitor + # From either a text file, or a edb_mnemonics_montior database table + monitor_dir = os.path.dirname(os.path.abspath(__file__)) + + # Define the output directory in which the html files will be saved + self.plot_output_dir = os.path.join(base_dir, instrument_name) + ensure_dir_exists(self.plot_output_dir) + + # File of mnemonics to monitor + mnemonic_file = os.path.join(monitor_dir, 'edb_monitor_data', f'{instrument_name}_mnemonics_to_monitor.json') + + # Read in file with nominal list of mnemonics + with open(mnemonic_file) as json_file: + mnem_dict = json.load(json_file) + + # Filter to keep only the requested mnemonics + filtered_mnemonic_dict = {} + for telem_type in mnem_dict: + for mnemonic in mnem_dict[telem_type]: + if mnemonic["name"] in mnem_to_query: + if telem_type not in filtered_mnemonic_dict: + filtered_mnemonic_dict[telem_type] = [] + filtered_mnemonic_dict[telem_type].append(mnemonic) + + self.run(instrument_name, filtered_mnemonic_dict, plot_start=plot_start, plot_end=plot_end) + logging.info(f'Monitor complete for {instrument_name}') + else: + # Here, no input was provided on specific mnemonics to run, so we run the entire suite + # as defined by the json files. This is the default operation. + + # Loop over instruments + for instrument_name in JWST_INSTRUMENT_NAMES: + monitor_dir = os.path.dirname(os.path.abspath(__file__)) + + # File of mnemonics to monitor + mnemonic_file = os.path.join(monitor_dir, 'edb_monitor_data', f'{instrument_name}_mnemonics_to_monitor.json') + + # Define the output directory in which the html files will be saved + self.plot_output_dir = os.path.join(base_dir, instrument_name) + ensure_dir_exists(self.plot_output_dir) + + # Read in file with nominal list of mnemonics + with open(mnemonic_file) as json_file: + mnem_dict = json.load(json_file) + + # Run with the entire dictionary + self.run(instrument_name, mnem_dict, plot_start=plot_start, plot_end=plot_end) + logging.info(f'Monitor complete for {instrument_name}') + + logging.info(f'EDB Telemetry Monitor completed successfully.') + + def filter_telemetry(self, mnem, data, dep_list): + """ + Filter telemetry data for a single mnemonic based on a list of + conditions/dependencies, as well as a time. + + Parameters + ---------- + mnem : str + Name of the mnemonic whose dependencies will be queried + + data : jwql.edb.engineering_database.EdbMnemonic + Information and query results for a single mnemonic + + dep_list : list + List of dependencies for a given mnemonic. Each element of the list + is a dictionary containing the keys: name, relation, and threshold. + In nominal operations, these are read out of the json file listing the + mnemonics to be monitored. + + Returns + ------- + filtered : jwql.edb.engineering_database.EdbMnemonic + Filtered information and query results for a single mnemonic + """ + if len(dep_list) == 0: + return data + + all_conditions = [] + for dependency in dep_list: + + if dependency["name"] != mnem: + # If the dependency to retrieve is different than the mnemonic being filtered, + # get the dependency's times and values from the EDB. + dep_mnemonic = self.get_dependency_data(dependency, data.requested_start_time, data.requested_end_time) + + else: + # If we are just filtering the mnemonic based on it's own values, then there is + # no need to query the EDB + dep_mnemonic = {} + dep_mnemonic["dates"] = data.data["dates"] + dep_mnemonic["euvalues"] = data.data["euvalues"] + + if len(dep_mnemonic["dates"]) > 0: + # For each dependency, get a list of times where the data are considered good + # (e.g. the voltage is greater than 0.25) + time_boundaries = condition.relation_test(dep_mnemonic, dependency["relation"], dependency["threshold"]) + + # Add these times to the list of times associated with all dependencies. + all_conditions.append(time_boundaries) + else: + # In this case, the query for dependency data returned an empty array. With no information + # on the dependency, it seems like we have to throw out the data for the mnemonic of + # interest, because there is no way to know if the proper conditions have been met. + logging.info((f'No data for dependency {dependency["name"]} between {data.requested_start_time} and {data.requested_end_time}, ' + f'so ignoring {mnem} data for the same time period.')) + + filtered = empty_edb_instance(data.mnemonic_identifier, data.requested_start_time, + data.requested_end_time, meta=data.meta, info=data.info) + return filtered + + # Now find the mnemonic's data that during times when all conditions were met + full_condition = condition.condition(all_conditions) + + # For change only data, like SE_ZIMIRICEA, interpolate values to match the start and stop + # times of all conditions in order to prevent a very stable value (meaning few datapoints) + # from being interpreted as having no data during the time period of the conditions + if data.meta['TlmMnemonics'][0]['AllPoints'] == 0: + boundary_times = [] + for cond in all_conditions: + for time_pair in cond.time_pairs: + boundary_times.extend([time_pair[0], time_pair[1]]) + # If we have a valid list of start/stop times (i.e. no None entries), then + # add data for those points to the exsiting collection of date/value points. + # To do this we use interpolate, but afterwards the data will still be change- + # only. + if None not in boundary_times: + existing_dates = np.array(data.data["dates"]) + unique_boundary_dates = np.unique(np.array(boundary_times)) + interp_dates = sorted(np.append(existing_dates, unique_boundary_dates)) + data.interpolate(interp_dates) + + full_condition.extract_data(data.data) + + # Put the results into an instance of EdbMnemonic + filtered = ed.EdbMnemonic(data.mnemonic_identifier, data.requested_start_time, data.requested_end_time, + full_condition.extracted_data, data.meta, data.info, blocks=full_condition.block_indexes) + return filtered + + def find_all_changes(self, mnem_data, dep_list): + """Identify indexes of data to create separate blocks for each value of the + condition. This is for the "every_change" mnemonics, where we want to create a + mean value for all telemetry data acquired for each value of some dependency + mnemonic. + + For now, this function assumes that we only have one dependency. I'm not sure + how it would work with multiple dependencies. + + Parameters + ---------- + mnem_data : jwql.edb.engineering_database.EdbMnemonic + EDBMnemonic instance to be searched + + dep_list : list + List of dependency mnemonic names. Currently should be a 1-element list + + Returns + ------- + mnem_data : jwql.edb.engineering_database.EdbMnemonic + EDBMnemonic instance that was input, with ```blocks``` and ```every_change_values``` + attributes added. + """ + if len(dep_list) > 1: + raise NotImplementedError("Not sure how to work with every_change data with multiple dependencies.") + + # If the mnemonic instance is empty, then populate the blocks and every_change_values + # attributes with defaults, and exit + if len(mnem_data) == 0: + mnem_data.blocks = [0, 1] + mnem_data.every_change_values = [np.nan] + return mnem_data + + # Retrieve the data for the dependency to use + dependency = self.get_dependency_data(dep_list[0], mnem_data.requested_start_time, + mnem_data.requested_end_time) + + # If the dependency data are empty, then we can't define blocks. Set the entire range + # of data to a single block. Since filter_data is called before find_all_changes, we + # *should* never end up in here, as missing dependency data should zero out the main + # mnemonic in there. + if len(dependency) == 0: + mnem_data.blocks = [0, len(mnem)] + mnem_data.every_change_values = [np.nan] + + # Make sure the data values for the dependency are strings. + if type(dependency["euvalues"][0]) != np.str_: + raise NotImplementedError("find_all_changes() is not set up to handle non-strings in the dependency data") + else: + # Find indexes within the data table where the value of the dependency changes. + change_indexes = np.where(dependency["euvalues"][:-1] != dependency["euvalues"][1:])[0] + + # Increase values by 1 to get the correct index for the full data length + if len(change_indexes) > 0: + change_indexes += 1 + + # Add 0 as the first element + change_indexes = np.insert(change_indexes, 0, 0) + + # Add the largest index as the final element + change_indexes = np.insert(change_indexes, len(change_indexes), len(dependency["euvalues"])) + + # If dates differ between the mnemonic of interest and the dependency, then interpolate to match + # If the mnemonic of interest is change-only data, then we need to interpolate onto a list of dates + # that include those originally in the mnemonic plus those in the dependency) + if (len(dependency["dates"]) != len(mnem_data.data["dates"].value)) or not np.all(dependency["dates"] == mnem_data.data["dates"].value): + if mnem_data.meta['TlmMnemonics'][0]['AllPoints'] != 0: + mnem_data.interpolate(dependency["dates"]) + else: + # In practice, we should never end up in this block, because change-only data are transformed + # into every-point data after being returned by the query. It might be useful to keep this + # here for now, in case that situation changes later. + all_dates = sorted(np.append(np.array(dependency["dates"]), np.array(mnem_data.data["dates"].data))) + mnem_data.interpolate(all_dates) + + # We also need to interpolate the dependency onto the same dates here, so that we know + # the new indexes where the values change + temp_dep = ed.EdbMnemonic(dep_list[0]['name'], dependency["dates"][0], dependency["dates"][-1], + dependency, meta={'TlmMnemonics': [{'AllPoints': 1}]}, info={}, blocks=change_indexes) + temp_dep.interpolate(all_dates) + change_indexes = temp_dep.blocks + + # Get the dependency values for each change. + vals = dependency["euvalues"][change_indexes[0:-1]].data + + # Place the dependency values in the every_change_values attribute, and the corresponding + # indexes where the changes happen into the blocks attribute. + mnem_data.blocks = change_indexes + mnem_data.every_change_values = vals + + return mnem_data + + def generate_query_start_times(self, starting_time): + """Generate a list of starting and ending query times such that the entire time range + is covered, but we are only querying the EDB for one day's worth of data at a time. + Start times are once per day between the previous query time and the present. End + times are the start times plus the query duration. + + Parameters + ---------- + starting_time : datetime.datetime + Datetime specifying the earliest time to query the EDB + + Returns + ------- + query_starting_times : list + List of datetime objects giving start times for EDB queries on a daily basis + + query_ending_times : list + List of datetime objects giving ending times for EDB queries on a daily basis + """ + if starting_time is None: + query_starting_times = None + query_ending_times = None + logging.info(f'Query start times: None') + else: + query_starting_times = [] + query_ending_times = [] + dtime = self._plot_end - starting_time + if dtime > self.query_duration: + full_days = dtime.days + partial_day = dtime.seconds + # If the time span is not a full day, but long enough to cover the query_duration, + # then we can fit in a final query on the last day + if partial_day > self.query_duration.total_seconds(): + full_days += 1 + else: + # If the time between the query start and the plot end time + # is less than the query duration, then we do not query the EDB. + # The next run of the monitor will be used to cover that time. + return None, None + + for delta in range(full_days): + tmp_start = starting_time + datetime.timedelta(days=delta) + query_starting_times.append(tmp_start) + query_ending_times.append(tmp_start + self.query_duration) + + # Make sure the end time of the final query is before the current time. + # If it is after the present time, remove start,end pairs until the + # latest ending time is before the present. It's better to throw out + # the entire start,end entry rather than shorten the final start,end + # pair because that can potentially cause a block of time to be skipped + # over on the next run of the monitor. + if query_ending_times[-1] > self._today: + query_ending_times = np.array(query_ending_times) + query_starting_times = np.array(query_starting_times) + valid_ending_times = query_ending_times <= self._today + query_starting_times = query_starting_times[valid_ending_times] + query_ending_times = query_ending_times[valid_ending_times] + return query_starting_times, query_ending_times + + def get_dependency_data(self, dependency, starttime, endtime): + """Find EDB data for the mnemonic listed as a dependency. Keep a dcitionary up to + date with query results for all dependencies, in order to minimize the number of + queries we have to make. Return the requested dependency's time and data values + in a dictionary. + + Parameters + ---------- + dependency : dict + The name of the mnemonic to seach for should be the value associated with the + "name" key. + + starttime : datetime.datetime + Staritng time for the query + + endtime : datetime.datetime + Ending time for the query + + Returns + ------- + dep_mnemonic : dict + Data for the dependency mnemonic. Keys are "dates" and "euvalues". This is + essentially the data in the ```data``` attribute of an EDBMnemonic instance + """ + # If we have already queried the EDB for the dependency's data in the time + # range of interest, then use that data rather than re-querying. + if dependency["name"] in self.query_results: + + # We need the full time to be covered + if ((self.query_results[dependency["name"]].requested_start_time <= starttime) + and (self.query_results[dependency["name"]].requested_end_time >= endtime)): + + logging.info(f'Dependency {dependency["name"]} is already present in self.query_results.') + + # Extract data for the requested time range + matching_times = np.where((self.query_results[dependency["name"]].data["dates"] >= starttime) + & (self.query_results[dependency["name"]].data["dates"] <= endtime)) + dep_mnemonic = {"dates": self.query_results[dependency["name"]].data["dates"][matching_times], + "euvalues": self.query_results[dependency["name"]].data["euvalues"][matching_times]} + + logging.info(f'Length of returned data: {len(dep_mnemonic["dates"])}') + else: + # If what we have from previous queries doesn't cover the time range we need, then query the EDB. + logging.info(f'Dependency {dependency["name"]} is present in self.query results, but does not cover the needed time. Querying EDB for the dependency.') + mnemonic_data = ed.get_mnemonic(dependency["name"], starttime, endtime) + logging.info(f'Length of returned data: {len(mnemonic_data)}, {starttime}, {endtime}') + + # Place the data in a dictionary + dep_mnemonic = {"dates": mnemonic_data.data["dates"], "euvalues": mnemonic_data.data["euvalues"]} + + # This is to save the data so that we may avoid an EDB query next time + # Add the new data to the saved query results. This should also filter out + # any duplicate rows. + self.query_results[dependency["name"]] = self.query_results[dependency["name"]] + mnemonic_data + else: + # In this case, the dependency is not present at all in the dictionary of past query results. + # So here we again have to query the EDB. + logging.info(f'Dependency {dependency["name"]} is not in self.query_results. Querying the EDB.') + self.query_results[dependency["name"]] = ed.get_mnemonic(dependency["name"], starttime, endtime) + logging.info(f'Length of data: {len(self.query_results[dependency["name"]])}, {starttime}, {endtime}') + + dep_mnemonic = {"dates": self.query_results[dependency["name"]].data["dates"], + "euvalues": self.query_results[dependency["name"]].data["euvalues"]} + + return dep_mnemonic + + def get_history(self, mnemonic, start_date, end_date, info={}, meta={}): + """Retrieve data for a single mnemonic over the given time range from the JWQL + database (not the EDB). + + Parameters + ---------- + mnemonic : str + Name of mnemonic whose data is to be retrieved + + start_date : datetime.datetime + Beginning date of data retrieval + + end_date : datetime.datetime + Ending date of data retrieval + + info : dict + Info dictionary for an EDBMnemonic instance. + + meta : dict + Meta dictionary for an EDBMnemonic instance. + + Returns + ------- + hist : jwql.edb.engineering_database.EdbMnemonic + Retrieved data + """ + data = session.query(self.history_table) \ + .filter(self.history_table.mnemonic == mnemonic, + self.history_table.latest_query > start_date, + self.history_table.latest_query < end_date) + + all_dates = [] + all_values = [] + all_medians = [] + all_means = [] + all_maxs = [] + all_mins = [] + # Each row contains a list of dates and data that could have elements + # outside of the plot range. Return only the points inside the desired + # plot range + for row in data: + good = np.where((np.array(row.times) > self._plot_start) & (np.array(row.times) < self._plot_end))[0] + times = list(np.array(row.times)[good]) + data = list(np.array(row.data)[good]) + medians = list(np.array(row.median)[good]) + maxs = list(np.array(row.max)[good]) + mins = list(np.array(row.min)[good]) + all_dates.extend(times) + all_values.extend(data) + all_means.extend(data) + all_medians.extend(medians) + all_maxs.extend(maxs) + all_mins.extend(mins) + + tab = Table([all_dates, all_values], names=('dates', 'euvalues')) + hist = ed.EdbMnemonic(mnemonic, start_date, end_date, tab, meta, info) + hist.median = all_medians + hist.median_times = all_dates + hist.max = all_maxs + hist.min = all_mins + hist.mean = all_means + return hist + + def get_history_every_change(self, mnemonic, start_date, end_date): + """Retrieve data from the JWQL database for a single mnemonic over the given time range + for every_change data (e.g. IMIR_HK_FW_POS_RATIO, where we need to calculate and store + an average value for each block of time where IMIR_HK_FW_CUR_POS has a different value. + This has nothing to do with 'change-only' data as stored in the EDB. + + Parameters + ---------- + mnemonic : str + Name of mnemonic whose data is to be retrieved + + start_date : datetime.datetime + Beginning date of data retrieval + + end_date : datetime.datetime + Ending date of data retrieval + + Returns + ------- + hist : dict + Retrieved data. Keys are the value of the dependency mnemonic, + and each value is a 3-tuple. The tuple contains the times, values, + and mean value of the primary mnemonic corresponding to the times + that they dependency mnemonic has the value of the key. + """ + data = session.query(self.history_table) \ + .filter(self.history_table.mnemonic == mnemonic, + self.history_table.latest_query > start_date, + self.history_table.latest_query < end_date) + + # Set up the dictionary to contain the data + hist = {} + + # Place the data from the database into the appropriate key + for row in data: + if row.dependency_value in hist: + if len(hist[row.dependency_value]) > 0: + times, values, medians, devs = hist[row.dependency_value] + medians = [medians] + devs = [devs] + else: + times = [] + values = [] + medians = [] + devs = [] + + # Keep only data that fall at least partially within the plot range + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) \ + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + times.extend(row.time) + values.extend(row.mnemonic_value) + medians.append(row.median) + devs.append(row.stdev) + hist[row.dependency_value] = (times, values, medians, devs) + else: + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) \ + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + hist[row.dependency_value] = (row.time, row.mnemonic_value, row.median, row.stdev) + + return hist + + def get_mnemonic_info(self, mnemonic, starting_time, ending_time, telemetry_type): + """Wrapper around the code to query the EDB, filter the result, and calculate + appropriate statistics for a single mnemonic + + Parameters + ---------- + mnemonic : dict + Dictionary of information about the mnemonic to be processed. Dictionary + as read in from the json file of mnemonics to be monitored. + + starting_time : datetime.datetime + Beginning time for query + + ending_time : datetime.datetime + Ending time for query + + telemetry_type : str + How the telemetry will be processed. This is the top-level heirarchy from + the json file containing the mnemonics. e.g. "daily_means", "every_change" + + Returns + ------- + good_mnemonic_data : jwql.edb.engineering_database.EdbMnemonic + EdbMnemonic instance containing filtered data for the given mnemonic + """ + logging.info(f'Querying EDB for: {mnemonic["name"]} from {starting_time} to {ending_time}') + + try: + mnemonic_data = ed.get_mnemonic(mnemonic["name"], starting_time, ending_time) + + if len(mnemonic_data) == 0: + logging.info(f"No data returned from EDB for {mnemonic['name']} between {starting_time} and {ending_time}") + return None + else: + logging.info(f'Retrieved from EDB, {mnemonic["name"]} between {starting_time} and {ending_time} contains {len(mnemonic_data)} data points.') + + # If the mnemonic has an alternative name (due to e.g. repeated calls for that mnemonic but with + # different averaging schemes), then update the mnemonic_identifier in the returned EdbMnemonic + # instance. This will allow different versions to be saved in the database. For example, monitoring + # a current value when a corresponding voltage value is low (i.e. turned off) and when it is high + # (turned on). + if "database_id" in mnemonic: + mnemonic_data.mnemonic_identifier = mnemonic["database_id"] + else: + mnemonic_data.mnemonic_identifier = mnemonic["name"] + + except (urllib.error.HTTPError, HTTPError): + # Sanity check that the mnemonic is available in the EDB. + logging.info(f'{mnemonic["name"]} not accessible with current search.') + return None + + # Filter the data to keep only those values/times where the dependency conditions are met. + if ((len(mnemonic["dependency"]) > 0) and (telemetry_type != "every_change")): + good_mnemonic_data = self.filter_telemetry(mnemonic["name"], mnemonic_data, mnemonic['dependency']) + logging.info(f'After filtering by dependencies, the number of data points is {len(good_mnemonic_data)}') + else: + # No dependencies. Keep all the data + good_mnemonic_data = mnemonic_data + good_mnemonic_data.blocks = [0] + + if telemetry_type == "every_change": + # If this is "every_change" data (i.e. we want to find the mean value of the mnemonic corresponding to + # each block of time when some dependency mnemonic changes value), then locate those changes in the + # dependency data here. Note that this adds the "every_change_values" attribute, which is not present + # for other telemety types, but will be needed for plotting and saving data in the database. + good_mnemonic_data = self.find_all_changes(good_mnemonic_data, mnemonic['dependency']) + + if telemetry_type == 'time_interval': + good_mnemonic_data.mean_time_block = utils.get_averaging_time_duration(mnemonic["mean_time_block"]) + + # If the filtered data contains enough entries, then proceed. + if len(good_mnemonic_data) > 0: + logging.info(f'get_mnemonic_info returning data of length {len(good_mnemonic_data)}') + return good_mnemonic_data + else: + logging.info(f'get_mnemonic_info returning data with zero length') + return None + + def identify_tables(self, inst, tel_type): + """Determine which database tables to use for a given type of telemetry. + + Parameters + ---------- + inst : str + Name of instrument (e.g. nircam) + + tel_type : str + Type of telemetry. This comes from the json file listing all mnemonics to be monitored. + Examples include "every_change", "daily", "all", etc + """ + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst] + if '_means' in tel_type: + tel_type = tel_type.strip('_means') + tel_type = tel_type.title().replace('_', '') + self.history_table_name = f'{mixed_case_name}EDB{tel_type}Stats' + self.history_table = getattr(database_interface, f'{mixed_case_name}EDB{tel_type}Stats') + + def most_recent_search(self, telem_name): + """Query the database and return the information + on the most recent query, indicating the last time the + EDB Mnemonic monitor was executed. + + Parameters + ---------- + telem_name : str + Mnemonic to search for + + Returns + ------- + query_result : datetime.datetime + Date of the ending range of the previous query + """ + query = session.query(self.history_table).filter(self.history_table.mnemonic == telem_name).order_by(self.history_table.latest_query).all() + + if len(query) == 0: + base_time = '2022-11-15 00:00:0.0' + query_result = datetime.datetime.strptime(base_time, '%Y-%m-%d %H:%M:%S.%f') + logging.info(f'\tNo query history for {telem_name}. Returning default "previous query" date of {base_time}.') + else: + query_result = query[-1].latest_query + logging.info(f'For {telem_name}, the previous query time is {query_result}') + + return query_result + + def multiday_mnemonic_query(self, mnemonic_dict, starting_time_list, ending_time_list, telemetry_type): + """Wrapper function. In order to avoid any issues with giant tables, for a given mnemonic we query the + EDB for only one day's worth of data at a time. For each day we retrieve the data, retrieve the data + for any dependencies, filter the data based on the dependencies, and then perform any requested + averaging before moving on to the next day. + + Parameters + ---------- + mnemonic_dict : dict + Dictionary of information for a single mnemonic. This comes from the json file describing all + mnemonics to be monitored + + starting_time_list : list + List of datetime values indicating beginning query times + + ending_time_list : list + List of datetime values indicating the end time of each query + + telemetry_type : str + Type of telemetry being retrieved. This is from the top-level of the json file describing + all mnemonics to be monitored. Examples include "every_change", "daily", "all". + + Returns + ------- + all_data : jwql.edb.engineering_database.EdbMnemonic + EDBMnemonic instance containing the mnemonic's filtered, averaged data spanning the entire + time range between the earliest start time and latest end time. Note that if averaging is + done, then the data table in this instance contains the averaged data. The original data + is not returned. + """ + multiday_table = Table() + multiday_median_times = [] + multiday_mean_vals = [] + multiday_stdev_vals = [] + multiday_median_vals = [] + multiday_max_vals = [] + multiday_min_vals = [] + multiday_every_change_data = [] + info = {} + meta = {} + identifier = mnemonic_dict[self._usename] + + # In cases where a mnemonic is going to be plotted as a product of itself with another mnemonic, + # construct a name that reflects this fact and use it in the mnemonic_identifer attribute. This + # will then end up as the plot title later. + if '*' in mnemonic_dict["plot_data"]: + # Strip off any comma-separated list of what to plot + second_part = mnemonic_dict["plot_data"].split(',')[0] + # Define the mnemonic identifier to be * + product_identifier = f'{mnemonic_dict[self._usename]}{second_part}' + logging.info(f'In multiday, product_identifier is: {product_identifier}') + + # Work one start time/end time pair at a time. + for i, (starttime, endtime) in enumerate(zip(starting_time_list, ending_time_list)): + # This function wraps around the EDB query and dependency filtering. + mnemonic_info = self.get_mnemonic_info(mnemonic_dict, starttime, endtime, telemetry_type) + + # If data are returned, do the appropriate averaging + if mnemonic_info is not None: + + identifier = mnemonic_info.mnemonic_identifier + info = mnemonic_info.info + meta = mnemonic_info.meta + + # Calculate mean/median/stdev + mnemonic_info = calculate_statistics(mnemonic_info, telemetry_type) + + # If this mnemonic is going to be plotted as a product with another mnemonic, then + # retrieve the second mnemonic info here + if '*' in mnemonic_dict["plot_data"]: + + if telemetry_type == 'every_change': + raise ValueError("Plotting product of two mnemonics is not supported for every-change data.") + + temp_dict = deepcopy(mnemonic_dict) + temp_dict["name"] = mnemonic_dict["plot_data"].split(',')[0].strip('*') + product_mnemonic_info = self.get_mnemonic_info(temp_dict, starttime, endtime, telemetry_type) + logging.info(f'Length of data for product mnemonic: {len(mnemonic_info)}') + + if product_mnemonic_info is None: + logging.info(f'{temp_dict["name"]} to use as product has no data between {starttime} and {endtime}.\n\n') + continue + + # If either mnemonic is change-only data, then first interpolate it + # onto the dates of the other. If they are both every-change data, + # then interpolate onto the mnemonic with the smaller date range + if mnemonic_info.meta['TlmMnemonics'][0]['AllPoints'] == 0: + if product_mnemonic_info.meta['TlmMnemonics'][0]['AllPoints'] == 0: + # If both mnemonics are change-only, then we need to translate them both + # to all-points. + delta_t = timedelta(seconds=1.) + mnem_numpts = (mnemonic_info.data["dates"][-1] - mnemonic_info.data["dates"][0]) / delta_t + 1 + mnem_new_dates = [mnemonic_info.data["dates"][0] + i * delta_t for i in range(len(mnem_numpts))] + + prod_numpts = (product_mnemonic_info.data["dates"][-1] - product_mnemonic_info.data["dates"][0]) / delta_t + 1 + prod_new_dates = [product_mnemonic_info.data["dates"][0] + i * delta_t for i in range(len(prod_numpts))] + + # Interpolate each onto its new list of allPoints dates. When they are multiplied below, + # they will be interpolated onto the same list of dates. + mnemonic_info.interpolate(mnem_new_dates) + product_mnemonic_info.interpolate(prod_new_dates) + + # Update metadata to reflect that these are now allPoints data + product_mnemonic_info.meta['TlmMnemonics'][0]['AllPoints'] = 1 + else: + mnemonic_info.interpolate(product_mnemonic_info.data["dates"]) + # Now that we have effectively converted the change-only data into allPoints data, + # modify the metadata to reflect that + mnemonic_info.meta['TlmMnemonics'][0]['AllPoints'] = 1 + else: + if product_mnemonic_info.meta['TlmMnemonics'][0]['AllPoints'] == 0: + # Interpolate onto the allPoints set of dates, and update the metadata + product_mnemonic_info.interpolate(mnemonic_info.data["dates"]) + product_mnemonic_info.meta['TlmMnemonics'][0]['AllPoints'] = 1 + else: + pass + + # Multiply the mnemonics together to get the quantity to be plotted + combined = mnemonic_info * product_mnemonic_info + logging.info(f'Length of data for product of mnemonics: {len(combined)}') + + # Calculate mean/median/stdev of the product data + mnemonic_info = calculate_statistics(combined, telemetry_type) + + # Combine information from multiple days here. If averaging is done, keep track of + # only the averaged data. If no averaging is done, keep all data. + if telemetry_type != 'all': + multiday_median_times.extend(mnemonic_info.median_times) + multiday_mean_vals.extend(mnemonic_info.mean) + multiday_median_vals.extend(mnemonic_info.median) + multiday_max_vals.extend(mnemonic_info.max) + multiday_min_vals.extend(mnemonic_info.min) + multiday_stdev_vals.extend(mnemonic_info.stdev) + if telemetry_type == 'every_change': + multiday_every_change_data.extend(mnemonic_info.every_change_values) + else: + multiday_median_times.extend(mnemonic_info.data["dates"].data) + multiday_mean_vals.extend(mnemonic_info.data["euvalues"].data) + multiday_stdev_vals.extend(mnemonic_info.stdev) + multiday_median_vals.extend(mnemonic_info.median) + multiday_max_vals.extend(mnemonic_info.max) + multiday_min_vals.extend(mnemonic_info.min) + + else: + logging.info(f'{mnemonic_dict["name"]} has no data between {starttime} and {endtime}.') + continue + + # If all daily queries return empty results, get the info metadata from the EDB, so + # that we can at least populate that in the output EDBMnemonic instance. + if len(info) == 0: + info = ed.get_mnemonic_info(mnemonic_dict["name"]) + + # Combine the mean values and median time data from multiple days into a single EdbMnemonic + # instance. + multiday_table["dates"] = multiday_median_times + multiday_table["euvalues"] = multiday_median_vals + all_data = ed.EdbMnemonic(identifier, starting_time_list[0], ending_time_list[-1], + multiday_table, meta, info) + all_data.stdev = multiday_stdev_vals + all_data.mean = multiday_mean_vals + all_data.median = multiday_median_vals + all_data.max = multiday_max_vals + all_data.min = multiday_min_vals + all_data.median_times = multiday_median_times + + # If it is an every_change mnemonic, then we need to also keep track of the dependency + # values that correspond to the mean values. + if telemetry_type == 'every_change': + all_data.every_change_values = multiday_every_change_data + + # Set the mnemonic identifier to be * + # This will be used in the title of the plot later + if '*' in mnemonic_dict["plot_data"]: + all_data.mnemonic_identifier = product_identifier + + logging.info(f'DONE retrieving/filtering/averaging data for {mnemonic_dict["name"]}') + return all_data + + def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): + """Run the monitor on a single mnemonic. + + Parameters + ---------- + instrument : str + Instrument name (e.g. nircam) + + mnemonic_dict : dict + Dictionary of information for a single mnemonic. Keys include: "name", "description", + "depenency", "plot_data", "yellow_limits", "red_limits", "plot_category". In normal + operation, this is read in from the json file that lists all mnemonics to be monitored + + plot_start : datetime.datetime + Starting time for the output plot + + plot_end : datetime.datetime + Ending time for the output plot + """ + # Container to hold and organize all plots + self.figures = {} + self.instrument = instrument + self._today = datetime.datetime.now() + + # Set the limits for the telemetry plots if necessary + if plot_start is None: + plot_start = self._today - datetime.timedelta(days=EDB_DEFAULT_PLOT_RANGE) + + if plot_end is None: + plot_end = self._today + + # Only used as fall-back plot range for cases where there is no data + self._plot_start = plot_start + self._plot_end = plot_end + + # At the top level, we loop over the different types of telemetry. These types + # largely control if/how the data will be averaged. + for telemetry_kind in mnemonic_dict: + telem_type = telemetry_kind + logging.info(f'Working on telemetry_type: {telem_type}') + + # For the combined telemetry types (e.g. "all+daily_mean") break up + # into its component parts. Work on the second part (e.g. "daily_mean") + # first, and then the "all" part afterwards + if telemetry_kind in ALLOWED_COMBINATION_TYPES: + telem_type = telemetry_kind.split('+')[1] + logging.info(f'Working first on {telem_type}') + + # Figure out the time duration over which the mnemonic should be queried. In + # most cases this is just a full day. In some cases ("daily_average" telem_type) + # the query will span a shorter time since the mnemonic won't change much over + # a full day. + self.query_duration = utils.get_query_duration(telem_type) + + # Determine which database tables are needed based on instrument. A telemetry + # type of "all" indicates that no time-averaging is done, and therefore the + # data are not stored in the JWQL database (for database table size reasons). + if telem_type != 'all': + self.identify_tables(instrument, telem_type) + + # Work on one mnemonic at a time + for mnemonic in mnemonic_dict[telemetry_kind]: + logging.info(f'Working on {mnemonic["name"]}') + create_new_history_entry = True + + # Only two types of plots are currently supported. Plotting the data in the EdbMnemonic + # directly, and plotting it as the product with a second EdbMnemonic + if '*' not in mnemonic["plot_data"] and 'nominal' not in mnemonic["plot_data"]: + raise NotImplementedError(('The plot_data entry in the mnemonic dictionary can currently only ' + 'be "nominal" or "*", indicating that the current ' + 'mnemonic should be plotted as the product of the mnemonic*. ' + 'e.g. for a mnemonic that reports current, plot the data as a power by ' + 'multiplying with a mnemonic that reports voltage. No other mnemonic ' + 'combination schemes have been implemented.')) + + # A mnemonic that is being monitored in more than one way will have a secondary name to + # use for the database, stored in the "database_id" key. + self._usename = 'name' + if 'database_id' in mnemonic: + self._usename = 'database_id' + + # Construct the mnemonic identifer to be used for database entries and plot titles + if '*' in mnemonic["plot_data"]: + # Define the mnemonic identifier to be * + term2 = mnemonic["plot_data"].split(',')[0] + product_identifier = f'{mnemonic[self._usename]}{term2}' + else: + product_identifier = mnemonic[self._usename] + + if telem_type != 'all': + # Find the end time of the previous query from the database. + most_recent_search = self.most_recent_search(product_identifier) + + # For daily_means mnemonics, we force the search to always start at noon, and + # have a 1 day cadence + if telem_type == 'daily_means': + most_recent_search = datetime.datetime.combine(most_recent_search.date(), datetime.time(hour=12)) + + logging.info(f'Most recent search is {most_recent_search}.') + logging.info(f'Query cadence is {self.query_cadence}') + + if plot_end > (most_recent_search + self.query_cadence): + # Here we need to query the EDB to cover the entire plot range + logging.info("Plot range extends outside the time contained in the JWQLDB. Need to query the EDB.") + logging.info(f"Plot_end: {plot_end}") + logging.info(f"Most recent search: {most_recent_search}") + logging.info(f"Search end: {most_recent_search + self.query_cadence}") + starttime = most_recent_search + self.query_cadence + logging.info(f"New starttime: {starttime}") + else: + # Here the entire plot range is before the most recent search, + # so all we need to do is query the JWQL database for the data. + logging.info(f"Plot time span contained entirely in JWQLDB. No need to query EDB.") + create_new_history_entry = False + starttime = None + + else: + # In the case where telemetry data have no averaging done, we do not store the data + # in the JWQL database, in order to save space. So in this case, we will retrieve + # all of the data from the EDB directly, from some default start time until the + # present day. + starttime = plot_start + create_new_history_entry = False + + query_start_times, query_end_times = self.generate_query_start_times(starttime) + logging.info(f'Query start times: {query_start_times}') + logging.info(f'Query end times: {query_end_times}') + + if telem_type != 'all': + if query_start_times is not None: + + # Query the EDB/JWQLDB, filter by dependencies, and perform averaging + new_data = self.multiday_mnemonic_query(mnemonic, query_start_times, query_end_times, telem_type) + + else: + # In this case, all the data needed are already in the JWQLDB, so return an empty + # EDBMnemonic instance. This will be combined with the data from the JWQLDB later. + info = ed.get_mnemonic_info(mnemonic["name"]) + new_data = empty_edb_instance(mnemonic[self._usename], plot_start, plot_end, info=info) + new_data.mnemonic_identifier = product_identifier + logging.info(f'All data needed are already in JWQLDB.') + create_new_history_entry = False + else: + # For data where no averaging is done, all data must be retrieved from EDB. They are not + # stored in the JWQLDB + new_data = self.multiday_mnemonic_query(mnemonic, query_start_times, query_end_times, telem_type) + + # Save the averaged/smoothed data and dates/times to the database, but only for cases where we + # are averaging. For cases with no averaging the database would get too large too quickly. In + # that case the monitor will re-query the EDB for the entire history each time. + if telem_type != "all": + + # "every_change" data must be treated differently from other types of averaging, since + # those mnemonics have their data separated into collections based on the value of a + # dependency. + if telem_type != 'every_change': + + # Retrieve the historical data from the database, so that we can add the new data + # to it + historical_data = self.get_history(new_data.mnemonic_identifier, plot_start, plot_end, info=new_data.info, + meta=new_data.meta) + ending = starttime + if ending is None: + ending = plot_end + historical_data.requested_end_time = ending + + logging.info(f'Retrieved data from JWQLDB. Number of data points: {len(historical_data)}') + + # Add the data newly filtered and averaged data retrieved from the EDB to the JWQLDB + # If no new data were retrieved from the EDB, then there is no need to add an entry to the JWQLDB + if create_new_history_entry: + self.add_new_block_db_entry(new_data, query_start_times[-1]) + logging.info('New data added to the JWQLDB.') + else: + logging.info("No new data retrieved from EDB, so no new entry added to JWQLDB") + + # Now add the new data to the historical data + mnemonic_info = new_data + historical_data + logging.info(f'Combined new data plus historical data contains {len(mnemonic_info)} data points.') + else: + # "every_change" data is more complex, and requires custom functions + # Retrieve the historical data from the database, so that we can add the new data + # to it + historical_data = self.get_history_every_change(new_data.mnemonic_identifier, plot_start, plot_end) + logging.info(f'Retrieved data from JWQLDB. Number of data points per key:') + for key in historical_data: + logging.info(f'Key: {key}, Num of Points: {len(historical_data[key][0])}') + if historical_data == {}: + logging.info('No historical data') + + # Before we can add the every-change data to the database, organize it to make it + # easier to access. Note that every_change_data is now a dict rather than an EDBMnemonic instance + every_change_data = organize_every_change(new_data) + + # Add new data to JWQLDB. + # If no new data were retrieved from the EDB, then there is no need to add an entry to the JWQLDB + if create_new_history_entry: + self.add_new_every_change_db_entry(new_data.mnemonic_identifier, every_change_data, mnemonic['dependency'][0]["name"], + query_start_times[-1]) + else: + logging.info("No new data retrieved from EDB, so no new entry added to JWQLDB") + + # Combine the historical data with the new data from the EDB + for key in every_change_data: + logging.info(f'Key: {key}, Num of Points: {len(every_change_data[key][0])}') + logging.info(f'Total number of points in new_data from the EDB: {len(new_data)}') + + # Note that the line below will change mnemonic_info into a dictionary + mnemonic_info = add_every_change_history(historical_data, every_change_data) + + logging.info(f'Combined new data plus historical data. Number of data points per key:') + for key in mnemonic_info: + logging.info(f'Key: {key}, Num of Points: {len(mnemonic_info[key][0])}') + + else: + mnemonic_info = new_data + + # For a telemetry_kind that is a combination of all+something, here we work on the "all" part. + if telemetry_kind in ALLOWED_COMBINATION_TYPES: + temp_telem_type = "all" + + # Query the EDB/JWQLDB, filter by dependencies, and perform averaging + full_query_start_times, full_query_end_times = self.generate_query_start_times(self._plot_start) + additional_data = self.multiday_mnemonic_query(mnemonic, full_query_start_times, full_query_end_times, temp_telem_type) + + # Now arrange the data in a way that makes sense. Place the non-averaged data collected above + # into self.data, and the averaged data into the self.mean and self.median_times attributes + mnemonic_info.mean = mnemonic_info.data["euvalues"].value + mnemonic_info.median_times = mnemonic_info.data["dates"].value + tmp_table = Table() + tmp_table["dates"] = additional_data.data["dates"] + tmp_table["euvalues"] = additional_data.data["euvalues"] + mnemonic_info.data = tmp_table + + # Create plot + # If there is a nominal value, or yellow/red limits to be included in the plot, get those here + nominal = utils.check_key(mnemonic, "nominal_value") + yellow = utils.check_key(mnemonic, "yellow_limits") + red = utils.check_key(mnemonic, "red_limits") + + # Make the plot title as useful as possible. Include the description from the input json + # file. If there is none, fall back to the description from MAST. If that is also not + # present, then the title will be only the mnemonic name. + if 'description' in mnemonic: + plot_title = f'{new_data.mnemonic_identifier}: {mnemonic["description"]}' + elif 'description' in new_data.info: + plot_title = f'{new_data.mnemonic_identifier}: {new_data.info["description"]}' + else: + plot_title = new_data.mnemonic_identifier + + if telemetry_kind == 'every_change': + # For every_change data, the plot is more complex, and we must use the custom + # plot_every_change_data() method. Again, return the figure object without saving it. + figure = plot_every_change_data(mnemonic_info, new_data.mnemonic_identifier, new_data.info["unit"], + savefig=False, out_dir=self.plot_output_dir, show_plot=False, return_components=False, + return_fig=True, title=plot_title, minimal_start=self._plot_start, + minimal_end=self._plot_end) + + elif telemetry_kind in ALLOWED_COMBINATION_TYPES: + figure = mnemonic_info.plot_data_plus_devs(savefig=False, out_dir=self.plot_output_dir, nominal_value=nominal, + yellow_limits=yellow, red_limits=red, return_components=False, + return_fig=True, show_plot=False, title=plot_title) + else: + # For telemetry types other than every_change, the data will be contained in an instance of + # and EDBMnemonic. In this case, we can create the plot using the bokeh_plot method. The default + # behavior is to return the Bokeh figure itself, rather than the script and div. Also, do not + # save the figure and return the figure, or else Bokeh will later fail with an error that figure + # elements are shared between documents. + plot_mean = False + plot_median = False + plot_max = False + plot_min = False + plot_parts = mnemonic["plot_data"].split(',') + if 'median' in plot_parts: + # Assume that we want to plot only one of the mean and median + plot_median = True + plot_mean = False + if 'max' in plot_parts: + plot_max = True + if 'min' in plot_parts: + plot_min = True + + figure = mnemonic_info.bokeh_plot(savefig=False, out_dir=self.plot_output_dir, nominal_value=nominal, + yellow_limits=yellow, red_limits=red, return_components=False, + return_fig=True, show_plot=False, title=plot_title, plot_mean=plot_mean, + plot_median=plot_median, plot_max=plot_max, plot_min=plot_min) + + # Add the figure to a dictionary that organizes the plots by plot_category + self.add_figure(figure, mnemonic["plot_category"]) + + # Create a tabbed, gridded set of plots for each category of plot, and save as a json file. + self.tabbed_figure() + + def tabbed_figure(self, ncols=2): + """Create a tabbed object containing a panel of gridded plots in each tab. + + Parameters + ---------- + ncols : int + Number of columns of plots in each plot tab + """ + panel_list = [] + for key, plot_list in self.figures.items(): + grid = gridplot(plot_list, ncols=ncols, merge_tools=False) + + # Create one panel for each plot category + panel_list.append(Panel(child=grid, title=key)) + + # Assign the panels to Tabs + tabbed = Tabs(tabs=panel_list) + + # Save the tabbed plot to a json file + item_text = json.dumps(json_item(tabbed, "tabbed_edb_plot")) + basename = f'edb_{self.instrument}_tabbed_plots.json' + output_file = os.path.join(self.plot_output_dir, basename) + with open(output_file, 'w') as outfile: + outfile.write(item_text) + logging.info(f'JSON file with tabbed plots saved to {output_file}') + + +def add_every_change_history(dict1, dict2): + """Combine two dictionaries that contain every change data. For keys that are + present in both dictionaries, remove any duplicate entries based on date. + + Parameters + ---------- + dict1 : dict + First dictionary to combine + + dict2 : dict + Second dictionary to combine + + Returns + ------- + combined : collections.defaultdict + Combined dictionary + """ + combined = defaultdict(list) + + for key, value in dict1.items(): + if key in dict2: + + if np.min(value[0]) < np.min(dict2[key][0]): + all_dates = np.append(value[0], dict2[key][0]) + all_data = np.append(value[1], dict2[key][1]) + all_medians = np.append(value[2], dict2[key][2]) + all_devs = np.append(value[3], dict2[key][3]) + else: + all_dates = np.append(dict2[key][0], value[0]) + all_data = np.append(dict2[key][1], value[1]) + all_medians = np.append(dict2[key][2], value[2]) + all_devs = np.append(dict2[key][3], value[3]) + + # Not sure how to treat duplicates here. If we remove duplicates, then + # the mean values may not be valid any more. For example, if there is a + # 4 hour overlap, but each mean is for a 24 hour period. We could remove + # those 4 hours of entries, but then what would we do with the mean values + # that cover those times. Let's instead warn the user if there are duplicate + # entries, but don't take any action + unique_dates = np.unique(all_dates, return_index=False) + if len(unique_dates) != len(all_dates): + logging.info(("WARNING - There are duplicate entries in the every-change history " + "and the new entry. Keeping and plotting all values, but be sure the " + "data look ok.")) + updated_value = (all_dates, all_data, all_medians, all_devs) + combined[key] = updated_value + else: + combined[key] = value + # Add entries for keys that are in dict2 but not dict1 + for key, value in dict2.items(): + if key not in dict1: + combined[key] = value + return combined + + +def calculate_statistics(mnemonic_instance, telemetry_type): + """Wrapper function around the various methods that can be used to calculate mean/median/ + stdev values for a given mnemonic. The method used depends on the type of telemetry. + + Parameters + ---------- + mnemonic_instance : jwql.edb.engineering_database.EdbMnemonic + EdbMnemonic instance containing the telemetry data to be averaged. + + telemetry_type : str + Type of telemetry. Examples include "daily", "every_change", "all". These values + come from the top-level json file that lists the mnemonics to be monitored. + + Returns + ------- + mnemonic_instance : jwql.edb.engineering_database.EdbMnemonic + Modified EdbMnemonic instance with the "mean", "median", and "stdev" attributes + populated. + """ + if telemetry_type == "daily_means": + mnemonic_instance.daily_stats() + elif telemetry_type == "block_means": + mnemonic_instance.block_stats() + elif telemetry_type == "every_change": + mnemonic_instance.block_stats_filter_positions() + #mnemonic_instance.block_stats(ignore_vals=[0.], ignore_edges=True, every_change=True) + elif telemetry_type == "time_interval": + mnemonic_instance.timed_stats() + elif telemetry_type == "all": + mnemonic_instance.full_stats() + return mnemonic_instance + + +def define_options(parser=None, usage=None, conflict_handler='resolve'): + if parser is None: + parser = argparse.ArgumentParser(usage=usage, conflict_handler=conflict_handler) + + parser.add_argument('--mnem_to_query', type=str, default=None, help='Mnemonic to query for') + parser.add_argument('--plot_start', type=str, default=None, help='Start time for EDB monitor query. Expected format: "2022-10-31"') + parser.add_argument('--plot_end', type=str, default=None, help='End time for EDB monitor query. Expected format: "2022-10-31"') + return(parser) + + +def empty_edb_instance(name, beginning, ending, meta={}, info={}): + """Create an EdbMnemonic instance with an empty data table + + Parameters + ---------- + name : str + Name of mnemonic to attach to the empty EdbMnemonic instance + + beginning : datetime.datetime + Starting time value associated with empty instance + + ending : datetime.datetime + Ending time value associated with empty instance + + meta : dict + Meta data dictionary to attach to meta attribute + + info : dict + Info dictionary to attach to info attribute + + Returns + ------- + var : jwql.edb.engineering_database.EdbMnemonic + Empty instance of EdbMnemonic + """ + tab = Table() + tab["dates"] = [] + tab["euvalues"] = [] + return ed.EdbMnemonic(name, beginning, ending, tab, meta, info) + + +def ensure_list(var): + """Be sure that var is a list. If not, make it one. + + Parameters + ---------- + var : list or str or float or int + Variable to be checked + + Returns + ------- + var : list + var, translated into a list if necessary + """ + if not isinstance(var, list) and not isinstance(var, np.ndarray): + return [var] + else: + return var + + +def organize_every_change(mnemonic): + """Given an EdbMnemonic instance containing every_change data, + organize the information such that there are single 1d arrays + of data and dates for each of the dependency values. This will + make plotting and saving in the database much more straight + forward. Note that this is intended to be run on an EdbMnenonic + instance that has come out of multiday_mnemonic_query, so that + the data table contains averaged values. In this case, the + data in the data table will line up with the values given in + the every_change_values attribute. + + Parameters + ---------- + mnemonic : jwql.edb.engineering_database.EdbMnemonic + Object containing all data + + Returns + ------- + all_data : dict + Dictionary of organized results. Keys are the dependency values, + and values are tuples. The first element of each tuple is a list + of dates, the second element is a list of data values, and the third + is a the sigma-clipped mean value of the data. + """ + all_data = {} + + # If the input mnemonic is empty, return an empty dictionary + if len(mnemonic) == 0: + return all_data + + unique_vals = np.unique(mnemonic.every_change_values) + + if not isinstance(mnemonic.every_change_values, np.ndarray): + every_change = np.array(mnemonic.every_change_values) + else: + every_change = mnemonic.every_change_values + + # For each dependency value, pull out the corresponding mnemonic values and times. + for val in unique_vals: + good = np.where(every_change == str(val))[0] # val is np.str_ type. need to convert to str + val_times = mnemonic.data["dates"].data[good] + val_data = mnemonic.data["euvalues"].data[good] + + # Normalize by the expected value + medianval, stdevval = MIRI_POS_RATIO_VALUES[mnemonic.mnemonic_identifier.split('_')[2]][val] + + all_data[val] = (val_times, val_data, medianval, stdevval) + + return all_data + + +def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True, out_dir='./', nominal_value=None, yellow_limits=None, + red_limits=None, xrange=(None, None), yrange=(None, None), title=None, return_components=True, return_fig=False, + minimal_start=None, minimal_end=None): + """Create a plot for mnemonics where we want to see the behavior within + each change + + Parameters + ---------- + data : collections.defaultdict + Dictionary containing every_change data to be plotted. Keys should be the values of the + dependency mnemonic, and values should be 3-tuples (list of datetimes, list of data, + mean value) + + mnem_name : str + Name of the mnemonic being plotted. This will be used to generate a filename in the + case where the figure is saved. + + units : astropy.units.unit + Units associated with the data. This will be used as the y-axis label in the plot + + show_plot : bool + If True, show plot on screen rather than returning div and script + + savefig : bool + If True, file is saved to html file + + out_dir : str + Directory into which the html file is saved + + nominal_value : float + Expected or nominal value for the telemetry. If provided, a horizontal dashed line + at this value will be added. + + yellow_limits : list + 2-element list giving the lower and upper limits outside of which the telemetry value + is considered non-nominal. If provided, the area of the plot between these two values + will be given a green background, and that outside of these limits will have a yellow + background. + + red_limits : list + 2-element list giving the lower and upper limits outside of which the telemetry value + is considered worse than in the yellow region. If provided, the area of the plot outside + of these two values will have a red background. + + xrange : tuple + Tuple of min, max datetime values to use as the plot range in the x direction. + + yrange : tuple + Tuple of min, max datetime values to use as the plot range in the y direction. + + title : str + Will be used as the plot title. If None, mnem_name will be used as the title + + return_components : bool + If True, the components (script, div) of the figure will be returned + + return_fig : bool + If True, the Bokeh figure will be returned + + minimal_start : datetime.datetime + In the case where the data to be plotted consists of no or only one point, use this + as the earliest date in the plot + + minimal_end : datetime.datetime + In the case where the data to be plotted consists of no or only one point, use this + as the latest date in the plot + + Returns + ------- + obj : list or bokeh.plotting.figure + If return_components is True, returned object will be a list of [div, script] + If return_figure is True, a bokeh.plotting.figure will be returned + + """ + # Make sure that only one output type is specified, or bokeh will get mad + options = np.array([show_plot, savefig, return_components, return_fig]) + if np.sum(options) > 1: + trues = np.where(options)[0] + raise ValueError((f'{options[trues]} are set to True in plot_every_change_data. Bokeh ' + 'will only allow one of these to be True.')) + + # Create a useful plot title if necessary + if title is None: + title = mnem_name + + # yellow and red limits must come in pairs + if yellow_limits is not None: + if len(yellow_limits) != 2: + yellow_limits = None + if red_limits is not None: + if len(red_limits) != 2: + red_limits = None + + # Create figure + fig = figure(tools='pan,box_zoom,reset,wheel_zoom,save', x_axis_type='datetime', + title=title, x_axis_label='Time', y_axis_label=f'{units}') + + if savefig: + filename = os.path.join(out_dir, f"telem_plot_{mnem_name.replace(' ', '_')}.html") + logging.info(f'Saving plot to: {filename}') + + colors = [int(len(Turbo256) / len(data)) * e for e in range(len(data))] + + # Find the min and max values in the x-range. These may be used for plotting + # the nominal_value line later. Initialize here, and then dial them in based + # on the data. + min_time = datetime.datetime.today() + max_time = datetime.datetime(2021, 12, 25) + + logging.info('In plot_every_change_data:') + for (key, value), color in zip(data.items(), colors): + if len(value) > 0: + val_times, val_data, normval, stdevval = value + val_data = np.array(val_data) + dependency_val = np.repeat(key, len(val_times)) + + # Normalize by normval (the expected value) so that all data will fit on one plot easily + if type(val_data[0]) not in [np.str_, str]: + logging.info(f'key: {key}, len_data: {len(val_data)}, firstentry: {val_data[0]}, stats: {normval}, {stdevval}') + val_data /= normval + + source = ColumnDataSource(data={'x': val_times, 'y': val_data, 'dep': dependency_val}) + + ldata = fig.line(x='x', y='y', line_width=1, line_color=Turbo256[color], source=source, legend_label=key) + cdata = fig.circle(x='x', y='y', fill_color=Turbo256[color], size=8, source=source, legend_label=key) + + hover_tool = HoverTool(tooltips=[('Value', '@dep'), + ('Data', '@y{1.11111}'), + ('Date', '@x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[cdata]) + hover_tool.formatters = {'@x': 'datetime'} + fig.tools.append(hover_tool) + + if np.min(val_times) < min_time: + min_time = np.min(val_times) + if np.max(val_times) > max_time: + max_time = np.max(val_times) + + # If the input dictionary is empty, then create an empty plot with reasonable + # x range + if len(data.keys()) == 0: + null_dates = [minimal_start, minimal_end] + source = ColumnDataSource(data={'x': null_dates, 'y': [0, 0], 'dep': ['None', 'None']}) + ldata = fig.line(x='x', y='y', line_width=1, line_color='black', source=source, legend_label='None') + ldata.visible = False + totpts = 0 + else: + numpts = [len(val) for key, val in data.items()] + totpts = np.sum(np.array(numpts)) + + # For a plot with zero or one point, set the x and y range to something reasonable + if totpts < 2: + fig.x_range = Range1d(minimal_start - datetime.timedelta(days=1), minimal_end) + bottom, top = (-1, 1) + if yellow_limits is not None: + bottom, top = yellow_limits + if red_limits is not None: + bottom, top = red_limits + fig.y_range = Range1d(bottom, top) + + # If there is a nominal value provided, plot a dashed line for it + if nominal_value is not None: + fig.line([min_time, max_time], [nominal_value, nominal_value], color='black', + line_dash='dashed', alpha=0.5) + + # If limits for warnings/errors are provided, create colored background boxes + if yellow_limits is not None or red_limits is not None: + fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) + + # Make the x axis tick labels look nice + fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], + seconds=["%d %b %H:%M:%S.%3N"], + hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"] + ) + fig.xaxis.major_label_orientation = np.pi / 4 + + # Force the axes' range if requested + if xrange[0] is not None: + fig.x_range.start = xrange[0].timestamp() * 1000. + if xrange[1] is not None: + fig.x_range.end = xrange[1].timestamp() * 1000. + if yrange[0] is not None: + fig.y_range.start = yrange[0] + if yrange[1] is not None: + fig.y_range.end = yrange[1] + + fig.legend.location = "top_left" + fig.legend.click_policy = "hide" + + if savefig: + output_file(filename=filename, title=mnem_name) + save(fig) + set_permissions(filename) + + if show_plot: + show(fig) + if return_components: + script, div = components(fig) + return [div, script] + if return_fig: + return fig + + +if __name__ == '__main__': + module = os.path.basename(__file__).strip('.py') + start_time, log_file = monitor_utils.initialize_instrument_monitor(module) + + parser = define_options() + args = parser.parse_args() + + plot_start_dt = None + plot_end_dt = None + if args.plot_start is not None: + plot_start_dt = datetime.datetime.strptime(args.plot_start, '%Y-%m-%d') + if args.plot_end is not None: + plot_end_dt = datetime.datetime.strptime(args.plot_end, '%Y-%m-%d') + + monitor = EdbMnemonicMonitor() + monitor.execute(args.mnem_to_query, plot_start_dt, plot_end_dt) + monitor_utils.update_monitor_table(module, start_time, log_file) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor_utils/condition.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor_utils/condition.py new file mode 100755 index 000000000..fc3abb6ee --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor_utils/condition.py @@ -0,0 +1,429 @@ +#! /usr/bin/env python +"""Module generates conditions over one or more mnemonics + +This module's purpose is to filter input data based on a given +list of conditions. Data matching the list of conditions can be +extracted. + +If for instance we have a table of time/data values for a particular +mnemonic, and we wish to extract all data where the conditions "x>1" +and "y<0.25" are true, the module looks for all elements where the +condition applies and where it does not apply. Data points that match +the conditions are then copied into a new table. + +Authors +------- + - Daniel Kühbacher + - Bryan Hilbert + +Use +--- + This module is not prepared for standalone use. + + For use in program set condition up like below: + + import the module as follows: + >>>import condition as cond + + Create a list of conditions, each with a relation (e.g. '>'') + and a threshold value. + >>>all_conditions = [] + >>>dep = {"dates": [list_of_datetimes], "euvalues": [list_of_values]} + >>>good_times_1 = cond.relation_test(dep, '>', 0.25) + + >>>dep2 = {"dates": [list_of_datetimes], "euvalues": [list_of_values]} + >>>good_times_2 = cond.relation_test(dep2, '>', 0.25) + + Place condition list into instance of condition class. + >>>full_condition = cond.condition(all_conditions) + + Call the extract_data() method and provide an astropy Table containing + information to be checked against the conditions + >>>full_condition.extract_data(data_table) + + full_condition.extracted_data is then an astropy Table containing + the data that matches the list of conditions +""" +from astropy.table import Table +from copy import deepcopy # only needed for development +import numpy as np + + +class condition: + """Class to hold several subconditions""" + def __init__(self, cond_set): + """Initialize object with set of conditions + + Parameters + ---------- + cond_set : list + List of subconditions objects + """ + self.cond_set = cond_set + + # Initialize parameters + self.time_pairs = [] + self.__state = False + + def __len__(self): + """Return the number of rows in the catalog + """ + return len(self.cond_set) + + def __del__(self): + """Delete object - destructor method""" + del self.time_pairs[:] + + def print_times(self): + """Print conditions time pairs on command line (developement)""" + print('Available time pairs:') + for times in self.time_pairs: + print('list: ' + str(times)) + + def extract_data(self, mnemonic): + """Extract data from the mnemonic that match the condition + + condition is a list of conditions, each of which must have: + time_pairs = [con.time_pars for con in conditions] + + Each element of the time_pairs list should be a list of tuples with (start, end) + + Working example--- + mnemonic = {"dates": np.arange(14), "euvalues": np.array([12., 13., 13., 14, 12, 15, 13, 13, 13, 13, 10, 9, 13, 12])} + cond1_times = [(1., 5), (8, 16.)] + cond2_times = [(3., 6), (10, 14.)] + cond3_times = [(4., 12.)] + + For each time tuple in each condition, find whether each element in mnemonic falls + between the starting and ending times + tf1 = [((mnemonic["dates"] >= t[0]) & (mnemonic["dates"] <= t[1])) for t in cond1_times] + tf2 = [((mnemonic["dates"] >= t[0]) & (mnemonic["dates"] <= t[1])) for t in cond2_times] + tf3 = [((mnemonic["dates"] >= t[0]) & (mnemonic["dates"] <= t[1])) for t in cond3_times] + + Now for each condition, combine the boolean arrays into a single array that describes + whether each element of mnemonic falls within one of the time intervals + tf1_flat = tf1[0] | tf1[1] + tf2_flat = tf2[0] | tf2[1] + tf3_flat = tf3 # because there is only one time interval here + + Now combine the boolean arrays into a single array that describes whether each element + of mnemonic falls within one time interval of all conditions + tf = tf1_flat & tf2_flat & tf3_flat + """ + # 2D matrix to hold boolean values for all conditions + tf_matrix = np.zeros((len(self.cond_set), len(mnemonic["dates"]))).astype(bool) + + # Loop over conditions + for i, cond in enumerate(self.cond_set): + # Check if any of the time pairs include None, which indicates no good data + if None in cond.time_pairs[0]: + self.extracted_data = Table() + self.extracted_data['dates'] = [] + self.extracted_data['euvalues'] = [] + self.block_indexes = [0, 0] + return Table(names=('dates', 'euvalues')), None + else: + # Find whether each mnemonic time falls within each of the good time blocks + tf_cond = [((mnemonic["dates"].data >= times[0]) & (mnemonic["dates"].data <= times[1])) for times in cond.time_pairs] + + if len(tf_cond) > 1: + # If there are multiple blocks of good time pairs, combine them + # into a 2D array (rather than list) + tf_2d = np.zeros((len(tf_cond), len(tf_cond[1]))).astype(bool) + for index in range(len(tf_cond)): + tf_2d[index, :] = tf_cond[index] + + # Flatten the 2D boolean array. If the mnemonic's time falls within any of + # the good time pairs, it should be True here + tf_flat = np.any(tf_2d, axis=0) + elif len(tf_cond) == 1: + # If there is only one block of good times, then no need to create + # a 2D array and flatten + tf_flat = np.array(tf_cond) + else: + raise ValueError(f"tf_cond has a length of {len(tf_cond)}, which is not expected.") + + # Create a 2D boolean matrix that will hold the T/F values for all conditions + tf_matrix[i, :] = tf_flat + + # Finally, if the mnemonic's time falls within a good time block for all of the + # conditions, then it is considered good. + tf = np.all(tf_matrix, axis=0) + + # Extract the good data and save it in an array + good_data = Table() + good_data["dates"] = mnemonic["dates"][tf] + good_data["euvalues"] = mnemonic["euvalues"][tf] + self.extracted_data = good_data + + # We need to keep data from distinct blocks of time separate, because we may + # need to calculate statistics for each good time block separately. Use tf to + # find blocks. Anywhere an F falls between some T's, we have a separate block. + # Save tuples of (start_time, end_time) for blocks. + # Save those in self.block_indexes below. + + # Add a False 0th element + tf_plus_false = np.insert(tf, 0, False) + + # Now we need to find the indexes where the array switches from False to True. + # These will be the starting indexes of the blocks. (Remember to subtract 1 in + # order to account for the added False element) + switches = tf_plus_false.astype(int)[0:-1] - tf.astype(int) + switch_to_true = np.where(switches == -1)[0] + switch_to_false = np.where(switches == 1)[0] + + # These indexes apply to the original data. Once we extract the good + # data using tf, we now need to adjust these indexes so that they + # apply to the extracted data. + filtered_indexes = [] + for i in range(len(switch_to_true)): + if i == 0: + diff = switch_to_true[i] + else: + diff = switch_to_true[i] - switch_to_false[i - 1] + switch_to_true -= diff + switch_to_false -= diff + filtered_indexes.append(switch_to_true[i]) + self.block_indexes = filtered_indexes + + # Add the index of the final element if it's not there already + if len(self.block_indexes) > 0: + if self.block_indexes[-1] < len(good_data): + self.block_indexes.append(len(good_data)) + else: + self.block_indexes.append(len(good_data)) + + def get_interval(self, time): + """Returns time interval if "time" is in between starting and + ending times + + Parameters + ---------- + time : float + given time attribute + + Return + ------ + time_pair : tuple + pair of start_time and end_time where time is in between + """ + end_time = 10000000 + start_time = 0 + + # Check every condition + for cond in self.time_pairs: + # Check every time pair in condition + for pair in cond: + if (time > pair[0]) and (time < pair[1]): + if (end_time > pair[1]) and (start_time < pair[0]): + start_time = pair[0] + end_time = pair[1] + break + else: + break + + if (end_time != 10000000) and (start_time != 0): + return [start_time, end_time] + else: + return None + + def state(self, time): + """Checks whether condition is true or false at a given time. + Returns state of the condition at a given time + if state(given time)==True -> condition is true + if state(given time)==False -> condition is false + Checks condition for every sub condition in condition set + + Parameters + ---------- + time : float + Input time for condition query + + Returns + ------- + state : bool + True/False statement whether the condition applies or not + """ + state = self.__state + + for cond in self.time_pairs: + + if self.__check_subcondition(cond, time): + state = True + else: + state = False + break + + return state + + def __check_subcondition(self, cond, time): + """Check if the given time occurs within the time pairs + that are collected within the given condition. + """ + # If there are no values available + if cond[0][0] == 0: + return False + + for time_pair in cond: + # If just a positive time is available, return True + if (time_pair[1] == 0) and (time > time_pair[0]): + return True + + # If given time occurs between a time pair, return True + elif (time_pair[0]) <= time and (time < time_pair[1]): + return True + + else: + pass + + +class relation_test(): + """Class for comparing data points to a threshold value with some relation + """ + def __init__(self, mnemonic, rel, value): + """Initialize parameters. For example, if you have mnemonic data and + you want to know where the data have values > 0.25, then ```rel``` + should be '>' and value should be 0.25. + + Parameters + ---------- + mnemonic : jwql.edb.engineering_database.EdbMnemonic + Object containing time/value data for mnemonic of interest + + rel : str + Relation between the mnemonic data and ```value``` + (e.g. "=", ">") + + value : float + Threshold value for good data. + """ + self.time_pairs = [] + self.mnemonic = mnemonic + self.value = value + + if rel == "=": + rel = "==" + self.rel = rel + + self.time_pairs = self.cond_true_time() + + def cond_true_time(self): + """Find all times where all conditions are true + + Return + ------ + time_pairs : list + List of 2-tuples where each tuple contains the starting and + ending times of a block of data that matches the condition + """ + if self.rel == '>': + opp = '<=' + elif self.rel == '<': + opp = '>=' + elif self.rel == '==': + opp = '!=' + elif self.rel == '!=': + opp = '==' + elif self.rel == '<=': + opp = '>' + elif self.rel == '>=': + opp = '<' + else: + raise ValueError(f'Unrecognized relation: {self.rel}') + + good_points = eval(f'np.where(self.mnemonic["euvalues"] {self.rel} self.value)[0]') + bad_points = eval(f'np.where(self.mnemonic["euvalues"] {opp} self.value)[0]') + + good_time_values = self.mnemonic["dates"][good_points] + bad_time_values = self.mnemonic["dates"][bad_points] + + time_pairs = self.generate_time_pairs(good_time_values, bad_time_values) + return time_pairs + + def generate_time_pairs(self, good_times, bad_times): + """Define blocks of time where a condition is true. Creates a list of + tuples of (start time, end time) where a condition is true, given a + list of times where the condition is true and where it is false. + + For example: + good_times = [2, 3, 4, 7, 8] + bad_times = [0, 1, 5, 6, 9, 10] + + Will return: + [(2, 4), (7, 8)] + + Parameters + ---------- + good_times : list + List of times where some condition is True + + bad_times : list + List of times where some condition is False + + Returns + ------- + good_blocks : list + List of 2-tuples, where each tuple contains the starting and ending + time where the condition is True. + """ + good_times = list(sorted(set(good_times))) + bad_times = list(sorted(set(bad_times))) + + # Take care of the easy cases, where all times are good or all are bad + if len(bad_times) == 0: + if len(good_times) > 0: + # All values are good + return [(good_times[0], good_times[-1])] + else: + # No good or bad values + raise ValueError("No good or bad values provided. Unable to create list of corresponding times.") + else: + if len(good_times) == 0: + # All values are bad. + return [(None, None)] + + # Now the case where there are both good and bad input times + # Combine and sort the good and bad times lists + all_times = np.array(good_times + bad_times) + sort_idx = np.argsort(all_times) + all_times = all_times[sort_idx] + + # Create boolean arrays to match the time arrays. Combine in the same + # way as the good and bad time lists above. + good = [True] * len(good_times) + bad = [False] * len(bad_times) + all_vals = np.array(good + bad) + all_vals = all_vals[sort_idx] + + # Find the indexes where the value switches from one element to the next + a_change_indexes = np.where(all_vals[:-1] != all_vals[1:])[0] + change_indexes = a_change_indexes + 1 + change_indexes = np.insert(change_indexes, 0, 0) + change_len = len(change_indexes) + 1 + + # Now create tuples of the start and end times where the values change + # We need to know if the first element of the data is True or False, + # in order to get the index counters correct below. + if all_vals[0]: + start_idx = 0 + counter_delta = 0 + else: + start_idx = 1 + counter_delta = 1 + + # We need to loop over EVERY OTHER change index, so that in the end we have a list + # of tuples of only the good_times. i.e. we need to skip the blocks corresponding to + # the bad_times. + good_blocks = [] + for counti, strt in enumerate(change_indexes[start_idx:len(change_indexes):2]): + i = counti * 2 + counter_delta + if i < (len(change_indexes) - 1): + good_blocks.append((all_times[strt], all_times[change_indexes[i + 1] - 1])) + else: + good_blocks.append((all_times[strt], all_times[-1])) + + return good_blocks + + +if __name__ == '__main__': + pass diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor_utils/utils.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor_utils/utils.py new file mode 100644 index 000000000..8689a6d58 --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor_utils/utils.py @@ -0,0 +1,125 @@ +#! /usr/bin/env python + +"""Utility functions that can be used with the EDB Telemetry Monitor + +Authors +------- + + - Bryan Hilbert + +Use +--- + + This module can be imported as such: + >>> import utils + key = utils.check_key('my_key') +""" + +from datetime import timedelta + +import astropy.units as u + + +def check_key(dictionary, key): + """Check if a given key exists in the input dictionary. If so, return the value + for that key. If not, return None. + + Parameters + ---------- + dictionary : dict + Dictionary + + key : string + Key to search for + + Returns + ------- + obj: obj + Value associated with key, or None + """ + try: + return dictionary[key] + except KeyError: + return None + + +def get_averaging_time_duration(duration_string): + """Turn the string from the mnemonic json file that specifies the time + span to average the data over into an astropy quantity. This function + is intended to be called only for "time_interval" mnemonic types, where + the duration string is assumed to be of the form "X_UNIT", where X is + a number, and UNIT is a unit of time (e.g. sec, min, hour, day). + + Parameters + ---------- + duration_string : str + Length of time for the query + + Returns + ------- + time : astropy.units.quantity.Quantity + """ + try: + length, unit = duration_string.split('_') + length = float(length) + + if "min" in unit: + unit = u.minute + elif "sec" in unit: + unit = u.second + elif "hour" in unit: + unit = u.hour + elif "day" in unit: + unit = u.day + else: + raise ValueError(f"Unsupported time unit: {unit}") + + time = length * unit + + except ValueError: + raise ValueError(f"Unexpected/unsupported mnemonic duration string: {duration_string}") + return time + + +def get_query_duration(mnemonic_type): + """Turn the string version of the EDB query duration into a timedelta + quantity. Allowed duration_string values include "daily_means", + "every_change", "block_means", or "time_interval", or "all". These terms + describe more how the mnemonic's data will be processed after it is + retrieved, but we can map each mnemonic type to a length of time to + use for the EDB query. + + Parameters + ---------- + duration_string : str + Length of time for the query + + Returns + ------- + time : datetime.timedelta + """ + if mnemonic_type.lower() == "daily_means": + time = timedelta(days=0.01041667) + elif mnemonic_type in ["every_change", "block_means", "time_interval", "all"]: + time = timedelta(days=1) + else: + raise ValueError(f"Unrecognized mnemonic type: {mnemonic_type}. Unsure what duration to use for EDB query.") + return time + + +def remove_outer_points(telemetry): + """Strip the first and last data points from the input telemetry data. This is because + MAST includes the two datapoints immediately outside the requested time range. + + Parameters + ---------- + telemetry : jwql.edb.engineering_database.EDBMnemonic + Results from an EDB query. + + Returns + ------- + telemetry : jwql.edb.engineering_database.EDBMnemonic + EDBMnemonic object with first and last points removed + """ + telemetry.data.remove_row(0) + telemetry.data.remove_row(-1) diff --git a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py index 4d03d1791..5e4f7bb7f 100755 --- a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py +++ b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py @@ -44,24 +44,25 @@ import crds import matplotlib matplotlib.use('Agg') -import matplotlib.pyplot as plt -import numpy as np -from pysiaf import Siaf -from sqlalchemy.sql.expression import and_ - -from jwql.database.database_interface import FGSReadnoiseQueryHistory, FGSReadnoiseStats -from jwql.database.database_interface import MIRIReadnoiseQueryHistory, MIRIReadnoiseStats -from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats -from jwql.database.database_interface import NIRISSReadnoiseQueryHistory, NIRISSReadnoiseStats -from jwql.database.database_interface import NIRSpecReadnoiseQueryHistory, NIRSpecReadnoiseStats -from jwql.database.database_interface import session -from jwql.instrument_monitors import pipeline_tools -from jwql.utils import instrument_properties, monitor_utils -from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.logging_functions import log_info, log_fail -from jwql.utils.monitor_utils import update_monitor_table -from jwql.utils.permissions import set_permissions -from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config +import matplotlib.pyplot as plt # noqa: E348 (comparison to true) +import numpy as np # noqa: E348 (comparison to true) +from pysiaf import Siaf # noqa: E348 (comparison to true) +from sqlalchemy.sql.expression import and_ # noqa: E348 (comparison to true) + +from jwql.database.database_interface import FGSReadnoiseQueryHistory, FGSReadnoiseStats # noqa: E348 (comparison to true) +from jwql.database.database_interface import MIRIReadnoiseQueryHistory, MIRIReadnoiseStats # noqa: E348 (comparison to true) +from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats # noqa: E348 (comparison to true) +from jwql.database.database_interface import NIRISSReadnoiseQueryHistory, NIRISSReadnoiseStats # noqa: E348 (comparison to true) +from jwql.database.database_interface import NIRSpecReadnoiseQueryHistory, NIRSpecReadnoiseStats # noqa: E348 (comparison to true) +from jwql.database.database_interface import session, engine # noqa: E348 (comparison to true) +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline # noqa: E348 (comparison to true) +from jwql.instrument_monitors import pipeline_tools # noqa: E348 (comparison to true) +from jwql.utils import instrument_properties, monitor_utils # noqa: E348 (comparison to true) +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE # noqa: E348 (comparison to true) +from jwql.utils.logging_functions import log_info, log_fail # noqa: E348 (comparison to true) +from jwql.utils.monitor_utils import update_monitor_table # noqa: E348 (comparison to true) +from jwql.utils.permissions import set_permissions # noqa: E348 (comparison to true) +from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, copy_files # noqa: E348 (comparison to true) class Readnoise(): @@ -265,7 +266,7 @@ def image_to_png(self, image, outname): plt.title('{}'.format(outname)) # Save the figure - plt.savefig(output_filename, bbox_inches='tight', dpi=200, overwrite=True) + plt.savefig(output_filename, bbox_inches='tight', dpi=200) set_permissions(output_filename) logging.info('\t{} created'.format(output_filename)) @@ -342,28 +343,34 @@ def make_readnoise_image(self, data): The 2D readnoise image. """ - # Create a stack of correlated double sampling (CDS) images using input - # ramp data, combining multiple integrations if necessary. - logging.info('\tCreating stack of CDS difference frames') + logging.info('\tCreating readnoise image') num_ints, num_groups, num_y, num_x = data.shape - for integration in range(num_ints): - if num_groups % 2 == 0: - cds = data[integration, 1::2, :, :] - data[integration, ::2, :, :] - else: - # Omit the last group if the number of groups is odd - cds = data[integration, 1::2, :, :] - data[integration, ::2, :, :][:-1] - if integration == 0: - cds_stack = cds - else: - cds_stack = np.concatenate((cds_stack, cds), axis=0) + # Calculate the readnoise in slices to avoid memory issues on large files. + slice_width = 20 + cols_idx = np.array(np.arange(num_x)[::slice_width]) + readnoise = np.zeros((num_y, num_x)) + for idx in cols_idx: + # Create a stack of correlated double sampling (CDS) images using input + # ramp data, combining multiple integrations if necessary. + for integration in range(num_ints): + if num_groups % 2 == 0: + cds = data[integration, 1::2, :, idx:idx + slice_width] - data[integration, ::2, :, idx:idx + slice_width] + else: + # Omit the last group if the number of groups is odd + cds = data[integration, 1::2, :, idx:idx + slice_width] - data[integration, ::2, :, idx:idx + slice_width][:-1] - # Calculate readnoise by taking the clipped stddev through CDS stack - logging.info('\tCreating readnoise image') - clipped = sigma_clip(cds_stack, sigma=3.0, maxiters=3, axis=0) - readnoise = np.std(clipped, axis=0) - # converts masked array to normal array and fills missing data - readnoise = readnoise.filled(fill_value=np.nan) + if integration == 0: + cds_stack = cds + else: + cds_stack = np.concatenate((cds_stack, cds), axis=0) + + # Calculate readnoise by taking the clipped stddev through CDS stack + clipped = sigma_clip(cds_stack, sigma=3.0, maxiters=3, axis=0) + readnoise_slice = np.ma.std(clipped, axis=0) + + # Add the readnoise in this slice to the full readnoise image + readnoise[:, idx:idx + slice_width] = readnoise_slice return readnoise @@ -380,7 +387,7 @@ def most_recent_search(self): """ query = session.query(self.query_table).filter(and_(self.query_table.aperture == self.aperture, - self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() + self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() # noqa: E712 (comparison to True) if len(query) == 0: query_result = 59607.0 # a.k.a. Jan 28, 2022 == First JWST images (MIRI) @@ -401,23 +408,32 @@ def process(self, file_list): List of filenames (including full paths) to the dark current files. """ + files_to_calibrate = [] + for file in file_list: + processed_file = file.replace("uncal", "refpix") + if not os.path.isfile(processed_file): + files_to_calibrate.append(file) + + # Run the files through the necessary pipeline steps + outputs = run_parallel_pipeline(files_to_calibrate, "uncal", "refpix", self.instrument) for filename in file_list: logging.info('\tWorking on file: {}'.format(filename)) # Get relevant header information for this file self.get_metadata(filename) - - # Run the file through the necessary pipeline steps - pipeline_steps = self.determine_pipeline_steps() - logging.info('\tRunning pipeline on {}'.format(filename)) - try: - processed_file = pipeline_tools.run_calwebb_detector1_steps(filename, pipeline_steps) - logging.info('\tPipeline complete. Output: {}'.format(processed_file)) - set_permissions(processed_file) - except: - logging.info('\tPipeline processing failed for {}'.format(filename)) - continue + + if filename in outputs: + processed_file = outputs[filename] + else: + refpix_file = filename.replace("uncal", "refpix") + if os.path.isfile(refpix_file): + processed_file = refpix_file + else: + # Processed file not available + logging.warning("Calibrated file {} not found".format(refpix_file)) + logging.warning("Skipping file {}".format(filename)) + continue # Find amplifier boundaries so per-amp statistics can be calculated _, amp_bounds = instrument_properties.amplifier_info(processed_file, omit_reference_pixels=True) @@ -431,8 +447,8 @@ def process(self, file_list): # Make the readnoise image readnoise_outfile = os.path.join(self.data_dir, os.path.basename(processed_file.replace('.fits', '_readnoise.fits'))) readnoise = self.make_readnoise_image(cal_data) - fits.writeto(readnoise_outfile, readnoise, overwrite=True) - logging.info('\tReadnoise image saved to {}'.format(readnoise_outfile)) + # fits.writeto(readnoise_outfile, readnoise, overwrite=True) + # logging.info('\tReadnoise image saved to {}'.format(readnoise_outfile)) # Calculate the full image readnoise stats clipped = sigma_clip(readnoise, sigma=3.0, maxiters=5) @@ -451,8 +467,9 @@ def process(self, file_list): readnoise_file = reffile_mapping['readnoise'] logging.info('\tPipeline readnoise reffile is {}'.format(readnoise_file)) pipeline_readnoise = fits.getdata(readnoise_file) - except: + except Exception as e: logging.warning('\tError retrieving pipeline readnoise reffile - assuming all zeros.') + logging.warning('\tError {} was raised'.format(e)) pipeline_readnoise = np.zeros(readnoise.shape) # Find the difference between the current readnoise image and the pipeline readnoise reffile, and record image stats. @@ -508,7 +525,8 @@ def process(self, file_list): readnoise_db_entry[key] = amp_stats[key].astype(float) # Add this new entry to the readnoise database table - self.stats_table.__table__.insert().execute(readnoise_db_entry) + with engine.begin() as connection: + connection.execute(self.stats_table.__table__.insert(), readnoise_db_entry) logging.info('\tNew entry added to readnoise database table') # Remove the raw and calibrated files to save memory space @@ -517,6 +535,7 @@ def process(self, file_list): @log_fail @log_info + @only_one(key='readnoise_monitor') def run(self): """The main method. See module docstrings for further details. @@ -548,10 +567,10 @@ def run(self): self.aperture = aperture # Locate the record of the most recent MAST search; use this time - # (plus a 30 day buffer to catch any missing files from the previous + # (plus a buffer to catch any missing files from the previous # run) as the start time in the new MAST search. most_recent_search = self.most_recent_search() - self.query_start = most_recent_search - 30 + self.query_start = most_recent_search - 70 # Query MAST for new dark files for this instrument/aperture logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) @@ -597,13 +616,20 @@ def run(self): logging.info('\t{} does not exist in JWQL filesystem, even though {} does'.format(uncal_filename, filename)) else: num_groups = fits.getheader(uncal_filename)['NGROUPS'] - if num_groups > 10: # skip processing if the file doesnt have enough groups to calculate the readnoise + num_ints = fits.getheader(uncal_filename)['NINTS'] + if instrument == 'miri': + total_cds_frames = int((num_groups - 6) / 2) * num_ints + else: + total_cds_frames = int(num_groups / 2) * num_ints + # Skip processing if the file doesnt have enough groups/ints to calculate the readnoise. + # MIRI needs extra since they omit the first five and last group before calculating the readnoise. + if total_cds_frames >= 10: shutil.copy(uncal_filename, self.data_dir) logging.info('\tCopied {} to {}'.format(uncal_filename, output_filename)) set_permissions(output_filename) new_files.append(output_filename) else: - logging.info('\tNot enough groups to calculate readnoise in {}'.format(uncal_filename)) + logging.info('\tNot enough groups/ints to calculate readnoise in {}'.format(uncal_filename)) except FileNotFoundError: logging.info('\t{} does not exist in JWQL filesystem'.format(file_entry['filename'])) @@ -624,7 +650,8 @@ def run(self): 'files_found': len(new_files), 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - self.query_table.__table__.insert().execute(new_entry) + with engine.begin() as connection: + connection.execute(self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('Readnoise Monitor completed successfully.') diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/15min_to_db.py b/jwql/instrument_monitors/miri_monitors/data_trending/15min_to_db.py deleted file mode 100755 index 0d755376b..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/15min_to_db.py +++ /dev/null @@ -1,107 +0,0 @@ -#! /usr/bin/env python -''' Auxiliary module to populate database - - This module was used throughout development to populate the database. Since - the EDB had no valid data during implementation, we had to download data - elsewhere. The downloaded data is in .CSV format and can easily be read - by the program. After import and sorting the process_file function extracts - the useful part and pushes it to the auxiliary database. This function can - be implemented in the final cron job. - -Authors -------- - - - Daniel Kühbacher - -Use ---- - Make sure "directory" points to a folder where useable 15min-samples are storedself. - Make sure you already ran .utils/sql_interface.py in order to create an - empty database with prepared tables. Run the module from the command line. - -Notes ------ - For developement only -''' - -import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql -import jwql.instrument_monitors.miri_monitors.data_trending.utils.csv_to_AstropyTable as apt -from jwql.instrument_monitors.miri_monitors.data_trending.utils.process_data import once_a_day_routine -from jwql.utils.utils import get_config - -import statistics -import os -import glob - -# set _location_ variable -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - -# point to the directory where your files are located! -directory = os.path.join(get_config()['outputs'], 'miri_data_trending', 'trainings_data_15min', '*.CSV') -paths = glob.glob(directory) - - -def process_file(conn, path): - '''Parse CSV file, process data within and put to DB - - Parameters - ---------- - conn : DBobject - Connection object to auxiliary database - path : str - defines file to read - ''' - - # import mnemonic data and append dict to variable below - m_raw_data = apt.mnemonics(path) - - # process raw data with once a day routine - processed_data = once_a_day_routine(m_raw_data) - - # push extracted and filtered data to temporary database - for key, value in processed_data.items(): - - # abbreviate data table - m = m_raw_data.mnemonic(key) - - if key == "SE_ZIMIRICEA": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "SE_ZIMIRICEA_IDLE", dataset) - - elif key == "IMIR_HK_ICE_SEC_VOLT4": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_IDLE", dataset) - - else: - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - -def main(): - # generate paths - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') - - # connect to temporary database - conn = sql.create_connection(DATABASE_FILE) - - # process every csv file in directory folder - for path in paths: - process_file(conn, path) - - # close connection - sql.close_connection(conn) - print("done") - - -if __name__ == "__main__": - main() diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/dashboard.py b/jwql/instrument_monitors/miri_monitors/data_trending/dashboard.py deleted file mode 100644 index 4c2e0d9f1..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/dashboard.py +++ /dev/null @@ -1,98 +0,0 @@ -#! /usr/bin/env python -"""Combines plots to tabs and prepares dashboard - -The module imports all prepares plot functions from .plots and combines -prebuilt tabs to a dashboard. Furthermore it defines the timerange for -the visualisation. Default time_range should be set to about 4 Month (120days) - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``data_container.py``, e.g.: - - :: - import jwql.instrument_monitors.miri_monitors.data_trending.dashboard as dash - dashboard, variables = dash.data_trending_dashboard(start_time, end_time) - -Dependencies ------------- - User must provide "miri_database.db" in folder jwql/database - -""" -import datetime -import os - -from bokeh.embed import components -from bokeh.models.widgets import Tabs - -# import plot functions -from .plots.power_tab import power_plots -from .plots.ice_voltage_tab import volt_plots -from .plots.fpe_voltage_tab import fpe_plots -from .plots.temperature_tab import temperature_plots -from .plots.bias_tab import bias_plots -from .plots.wheel_ratio_tab import wheel_ratios -import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql - - -# configure actual datetime in order to implement range function -now = datetime.datetime.now() -# default_start = now - datetime.timedelta(1000) -default_start = datetime.date(2017, 8, 15).isoformat() - -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) -PACKAGE_DIR = __location__.split('instrument_monitors')[0] - - -def data_trending_dashboard(start=default_start, end=now): - """Builds dashboard - Parameters - ---------- - start : time - configures start time for query and visualisation - end : time - configures end time for query and visualisation - Return - ------ - plot_data : list - A list containing the JavaScript and HTML content for the dashboard - variables : dict - no use - """ - - # connect to database - # DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_LOCATION = os.path.join(PACKAGE_DIR, 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') - - conn = sql.create_connection(DATABASE_FILE) - - # some variables can be passed to the template via following - variables = dict(init=1) - - # some variables can be passed to the template via following - variables = dict(init=1) - - # add tabs to dashboard - tab1 = power_plots(conn, start, end) - tab2 = volt_plots(conn, start, end) - tab3 = fpe_plots(conn, start, end) - tab4 = temperature_plots(conn, start, end) - tab5 = bias_plots(conn, start, end) - tab6 = wheel_ratios(conn, start, end) - - # build dashboard - tabs = Tabs(tabs=[tab1, tab2, tab3, tab5, tab4, tab6]) - - # return dashboard to web app - script, div = components(tabs) - plot_data = [div, script] - - # close sql connection - sql.close_connection(conn) - - return plot_data, variables diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/day_to_db.py b/jwql/instrument_monitors/miri_monitors/data_trending/day_to_db.py deleted file mode 100755 index 8b9961464..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/day_to_db.py +++ /dev/null @@ -1,137 +0,0 @@ -#! /usr/bin/env python -''' Auxiliary module to populate database - - This module was used throughout development to populate the database. Since - the EDB had no valid data during implementation we had to download data elsewhere. - The downloaded data is in .CSV format and can easily be read by the program. - After import and sorting the process_file function extracts the useful part and - pushes it to the auxiliary database. This function can be implemented in the - final cron job. - -Authors -------- - - - Daniel Kühbacher - -Use ---- - make sure "directory" points to a folder where useable day-samples are stored. - make sure you already ran .utils/sql_interface.py in order to create a empty database - with prepared tables. - Run the module form the command line. - -Notes ------ - For developement only -''' - -import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn -import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql -import jwql.instrument_monitors.miri_monitors.data_trending.utils.csv_to_AstropyTable as apt -from jwql.instrument_monitors.miri_monitors.data_trending.utils.process_data import whole_day_routine, wheelpos_routine -from jwql.utils.utils import get_config - -import os -import glob -import statistics - -# set _location_ variable -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - -# files with data to initially fill the database -directory = os.path.join(get_config()['outputs'], 'miri_data_trending', 'trainings_data_day', '*.CSV') -paths = glob.glob(directory) - - -def process_file(conn, path): - '''Parse CSV file, process data within and put to DB - - Parameters - ---------- - conn : DBobject - Connection object to temporary database - path : str - defines file to read - ''' - - m_raw_data = apt.mnemonics(path) - - cond3, FW_volt, GW14_volt, GW23_volt, CCC_volt = whole_day_routine(m_raw_data) - FW, GW14, GW23, CCC = wheelpos_routine(m_raw_data) - - # put data from con3 to database - for key, value in cond3.items(): - - m = m_raw_data.mnemonic(key) - - if value is not None: - if len(value) > 2: - if key == "SE_ZIMIRICEA": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "SE_ZIMIRICEA_HV_ON", dataset) - - elif key == "IMIR_HK_ICE_SEC_VOLT4": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_HV_ON", dataset) - - else: - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - ######################################################################################### - for pos in mn.fw_positions: - try: - data = FW[pos] - for element in data: - sql.add_wheel_data(conn, 'IMIR_HK_FW_POS_RATIO_{}'.format(pos), element) - except KeyError: - pass - - for pos in mn.gw_positions: - try: - data_GW14 = GW14[pos] - data_GW23 = GW23[pos] - - for element in data_GW14: - sql.add_wheel_data(conn, 'IMIR_HK_GW14_POS_RATIO_{}'.format(pos), element) - for element in data_GW23: - sql.add_wheel_data(conn, 'IMIR_HK_GW23_POS_RATIO_{}'.format(pos), element) - except KeyError: - pass - - for pos in mn.ccc_positions: - try: - data = CCC[pos] - for element in data: - sql.add_wheel_data(conn, 'IMIR_HK_CCC_POS_RATIO_{}'.format(pos), element) - except KeyError: - pass - - -def main(): - # point to database - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') - - # connect to temporary database - conn = sql.create_connection(DATABASE_FILE) - - # process all files found ind folder "directory" - for path in paths: - process_file(conn, path) - - sql.close_connection(conn) - print("done") - - -if __name__ == "__main__": - main() diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py deleted file mode 100755 index 3dcbdfdd4..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python -''' Cron Job for miri datatrending -> populates database - - This module holds functions to connect with the engineering database - in order to grab and process data for the specific miri database. The - script queries a daily 15 min chunk and a whole day dataset. These - contain several mnemonics defined in ''mnemonics.py''. The queried data - gets processed and stored in an auxiliary database. - -Authors -------- - - Daniel Kühbacher - -Dependencies ------------- - For further information please contact Brian O'Sullivan - -References ----------- - -''' -import utils.mnemonics as mn -import utils.sql_interface as sql -from utils.process_data import whole_day_routine, wheelpos_routine - -import statistics -import os - -from astropy.time import Time - -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) -PACKAGE_DIR = __location__.split('instrument_monitors')[0] - - -def process_day_sample(conn, m_raw_data): - '''Parse CSV file, process data within and put to DB - Parameters - ---------- - conn : DBobject - Connection object to temporary database - path : str - defines path to the files - ''' - - m_raw_data = apt.mnemonics(path) - - cond3, FW_volt, GW14_volt, GW23_volt, CCC_volt = whole_day_routine(m_raw_data) - FW, GW14, GW23, CCC = wheelpos_routine(m_raw_data) - - # put data from con3 to database - for key, value in cond3.items(): - - m = m_raw_data.mnemonic(key) - - if value is not None: - if len(value) > 2: - if key == "SE_ZIMIRICEA": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "SE_ZIMIRICEA_HV_ON", dataset) - - elif key == "IMIR_HK_ICE_SEC_VOLT4": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_HV_ON", dataset) - - else: - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - ########################################################################### - for pos in mn.fw_positions: - try: - data = FW[pos] - for element in data: - sql.add_wheel_data(conn, 'IMIR_HK_FW_POS_RATIO_{}'.format(pos), element) - except KeyError: - pass - - for pos in mn.gw_positions: - try: - data_GW14 = GW14[pos] - data_GW23 = GW23[pos] - - for element in data_GW14: - sql.add_wheel_data(conn, 'IMIR_HK_GW14_POS_RATIO_{}'.format(pos), element) - for element in data_GW23: - sql.add_wheel_data(conn, 'IMIR_HK_GW23_POS_RATIO_{}'.format(pos), element) - except KeyError: - pass - - for pos in mn.ccc_positions: - try: - data = CCC[pos] - for element in data: - sql.add_wheel_data(conn, 'IMIR_HK_CCC_POS_RATIO_{}'.format(pos), element) - except KeyError: - pass - - -def process_15min_sample(conn, m_raw_data): - '''Parse CSV file, process data within and put to DB - Parameters - ---------- - conn : DBobject - Connection object to temporary database - path : str - defines path to the files - ''' - - # import mnemonic data and append dict to variable below - m_raw_data = apt.mnemonics(path) - - # process raw data with once a day routine - processed_data = once_a_day_routine(m_raw_data) - - # push extracted and filtered data to temporary database - for key, value in processed_data.items(): - - # abbreviate data table - m = m_raw_data.mnemonic(key) - - if key == "SE_ZIMIRICEA": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "SE_ZIMIRICEA_IDLE", dataset) - - elif key == "IMIR_HK_ICE_SEC_VOLT4": - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_IDLE", dataset) - - else: - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - -def main(): - - from ..utils.engineering_database import query_single_mnemonic - - mnemonic_identifier = 'SA_ZFGOUTFOV' - start_time = Time(2016.0, format='decimalyear') - end_time = Time(2018.1, format='decimalyear') - - mnemonic = query_single_mnemonic(mnemonic_identifier, start_time, end_time) - assert len(mnemonic.data) == mnemonic.meta['paging']['rows'] - - for mnemonic in mn.mnemonic_set_15min: - whole_day.update(mnemonic=query_single_mnemonic(mnemonic, start, end)) - - # open temporary database and write data! - DATABASE_LOCATION = os.path.join(PACKAGE_DIR, 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') - - conn = sql.create_connection(DATABASE_FILE) - - process_day_sample(conn, table_day) - process_15process_15min_sample(conn, table_15min) - - sql.close_connection(conn) diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/__init__.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/bias_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/bias_tab.py deleted file mode 100644 index 7172fb620..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/plots/bias_tab.py +++ /dev/null @@ -1,364 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for BIAS tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1: - IGDP_MIR_IC_V_VDETCOM - IGDP_MIR_SW_V_VDETCOM - IGDP_MIR_LW_V_VDETCOM - - Plot 2: - IGDP_MIR_IC_V_VSSOUT - IGDP_MIR_SW_V_VSSOUT - IGDP_MIR_LW_V_VSSOUT - - Plot 3: - IGDP_MIR_IC_V_VRSTOFF - IGDP_MIR_SW_V_VRSTOFF - IGDP_MIR_LW_V_VRSTOFF - - Plot 4: - IGDP_MIR_IC_V_VP - IGDP_MIR_SW_V_VP - IGDP_MIR_LW_V_VP - - Plot 5 - IGDP_MIR_IC_V_VDDUC - IGDP_MIR_SW_V_VDDUC - IGDP_MIR_LW_V_VDDUC - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``dashborad.py``, e.g.: - - :: - from .plots.bias_tab import bias_plots - tab = bias_plots(conn, start, end) - -Dependencies ------------- - User must provide database "miri_database.db" - -""" - -import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import gridplot, Column - - -def vdetcom(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "VDETCOM" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "VDETCOM IC", "IGDP_MIR_IC_V_VDETCOM", start, end, conn, color="red") - b = pf.add_to_plot(p, "VDETCOM SW", "IGDP_MIR_SW_V_VDETCOM", start, end, conn, color="orange") - c = pf.add_to_plot(p, "VDETCOM LW", "IGDP_MIR_LW_V_VDETCOM", start, end, conn, color="green") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def vssout(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "VSSOUT" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "VSSOUT IC", "IGDP_MIR_IC_V_VSSOUT", start, end, conn, color="red") - b = pf.add_to_plot(p, "VSSOUT SW", "IGDP_MIR_SW_V_VSSOUT", start, end, conn, color="orange") - c = pf.add_to_plot(p, "VSSOUT LW", "IGDP_MIR_LW_V_VSSOUT", start, end, conn, color="green") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def vrstoff(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "VRSTOFF" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "VRSTOFF IC", "IGDP_MIR_IC_V_VRSTOFF", start, end, conn, color="red") - b = pf.add_to_plot(p, "VRSTOFF SW", "IGDP_MIR_SW_V_VRSTOFF", start, end, conn, color="orange") - c = pf.add_to_plot(p, "VRSTOFF LW", "IGDP_MIR_LW_V_VRSTOFF", start, end, conn, color="green") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def vp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "VP" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "VP IC", "IGDP_MIR_IC_V_VP", start, end, conn, color="red") - b = pf.add_to_plot(p, "VP SW", "IGDP_MIR_SW_V_VP", start, end, conn, color="orange") - c = pf.add_to_plot(p, "VP LW", "IGDP_MIR_LW_V_VP", start, end, conn, color="green") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def vdduc(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "VDDUC" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "VDDUC IC", "IGDP_MIR_IC_V_VDDUC", start, end, conn, color="red") - b = pf.add_to_plot(p, "VDDUC SW", "IGDP_MIR_SW_V_VDDUC", start, end, conn, color="orange") - c = pf.add_to_plot(p, "VDDUC LW", "IGDP_MIR_LW_V_VDDUC", start, end, conn, color="green") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def bias_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
VSSOUTIGDP_MIR_IC_V_VSSOUT
- IGDP_MIR_SW_V_VSSOUT
- IGDP_MIR_LW_V_VSSOUT
Detector Bias VSSOUT (IC,SW, & LW)
VDETCOMIGDP_MIR_IC_V_VDETCOM
- IGDP_MIR_SW_V_VDETCOM
- IGDP_MIR_LW_V_VDETCOM
Detector Bias VDETCOM (IC,SW, & LW)
VRSTOFFIGDP_MIR_IC_V_VRSTOFF
- IGDP_MIR_SW_V_VRSTOFF
- IGDP_MIR_LW_V_VRSTOFF
Detector Bias VRSTOFF (IC,SW, & LW)
VPIGDP_MIR_IC_V_VP
- IGDP_MIR_SW_V_VP
- IGDP_MIR_LW_V_VP
Detector Bias VP (IC,SW, & LW)
VDDUCIGDP_MIR_IC_V_VDDUC
- IGDP_MIR_SW_V_VDDUC
- IGDP_MIR_LW_V_VDDUC
Detector Bias VDDUC (IC,SW, & LW)
- - """, width=1100) - - plot1 = vdetcom(conn, start, end) - plot2 = vssout(conn, start, end) - plot3 = vrstoff(conn, start, end) - plot4 = vp(conn, start, end) - plot5 = vdduc(conn, start, end) - - lay = gridplot([[plot2, plot1], - [plot3, plot4], - [plot5, None]], - merge_tools=False) - - layout = Column(descr, lay) - - tab = Panel(child=layout, title="BIAS") - - return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/fpe_voltage_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/fpe_voltage_tab.py deleted file mode 100644 index 176259aeb..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/plots/fpe_voltage_tab.py +++ /dev/null @@ -1,411 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for FPE VOLTAGE tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1: - IMIR_PDU_V_DIG_5V - IMIR_PDU_I_DIG_5V - - Plot 2: - IMIR_PDU_V_ANA_5V - IMIR_PDU_I_ANA_5V - - Plot 3: - IMIR_PDU_V_ANA_N5V - IMIR_PDU_I_ANA_N5V - - Plot 4: - IMIR_PDU_V_ANA_7V - IMIR_PDU_I_ANA_7V - - Plot 5: - IMIR_PDU_V_ANA_N7V - IMIR_PDU_I_ANA_N7V - - Plot 6: - IMIR_SPW_V_DIG_2R5V - IMIR_PDU_V_REF_2R5V - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``dashborad.py``, e.g.: - - :: - from .plots.fpe_voltage_tab import fpe_plots - tab = fpe_plots(conn, start, end) - -Dependencies ------------- - User must provide database "miri_database.db" - -""" - -import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf -from bokeh.models import LinearAxis, Range1d -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import gridplot, Column - - -def dig5(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[4.9, 5.1], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "FPE Dig. 5V" - pf.add_basic_layout(p) - - p.extra_y_ranges = {"current": Range1d(start=2100, end=2500)} - a = pf.add_to_plot(p, "FPE Dig. 5V", "IMIR_PDU_V_DIG_5V", start, end, conn, color="red") - b = pf.add_to_plot(p, "FPE Dig. 5V Current", "IMIR_PDU_I_DIG_5V", start, end, conn, y_axis="current", color="blue") - p.add_layout(LinearAxis(y_range_name="current", axis_label="Current (mA)", axis_label_text_color="blue"), 'right') - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def refdig(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[2.45, 2.55], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "2.5V Ref and FPE Dig." - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "FPE Dig. 2.5V", "IMIR_SPW_V_DIG_2R5V", start, end, conn, color="orange") - b = pf.add_to_plot(p, "FPE PDU 2.5V REF", "IMIR_PDU_V_REF_2R5V", start, end, conn, color="red") - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def ana5(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[4.95, 5.05], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "FPE Ana. 5V" - pf.add_basic_layout(p) - - p.extra_y_ranges = {"current": Range1d(start=100, end=250)} - a = pf.add_to_plot(p, "FPE Ana. 5V", "IMIR_PDU_V_ANA_5V", start, end, conn, color="red") - b = pf.add_to_plot(p, "FPE Ana. 5V Current", "IMIR_PDU_I_ANA_5V", start, end, conn, y_axis="current", color="blue") - p.add_layout(LinearAxis(y_range_name="current", axis_label="Current (mA)", axis_label_text_color="blue"), 'right') - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def ana5n(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[-5.1, -4.85], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "FPE Ana. N5V" - pf.add_basic_layout(p) - - p.extra_y_ranges = {"current": Range1d(start=100, end=300)} - a = pf.add_to_plot(p, "FPE Ana. N5", "IMIR_PDU_V_ANA_N5V", start, end, conn, color="red") - b = pf.add_to_plot(p, "FPE Ana. N5 Current", "IMIR_PDU_I_ANA_N5V", start, end, conn, y_axis="current", color="blue") - p.add_layout(LinearAxis(y_range_name="current", axis_label="Current (mA)", axis_label_text_color="blue"), 'right') - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def ana7(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[6.85, 7.1], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "FPE Ana. 7V" - pf.add_basic_layout(p) - - p.extra_y_ranges = {"current": Range1d(start=300, end=450)} - a = pf.add_to_plot(p, "FPE Ana. 7V", "IMIR_PDU_V_ANA_7V", start, end, conn, color="red") - b = pf.add_to_plot(p, "FPE Ana. 7V Current", "IMIR_PDU_I_ANA_7V", start, end, conn, y_axis="current", color="blue") - p.add_layout(LinearAxis(y_range_name="current", axis_label="Current (mA)", axis_label_text_color="blue"), 'right') - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def ana7n(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[-7.1, -6.9], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "FPE Ana. N7V" - pf.add_basic_layout(p) - - p.extra_y_ranges = {"current": Range1d(start=350, end=400)} - a = pf.add_to_plot(p, "FPE Dig. N7V", "IMIR_PDU_V_ANA_N7V", start, end, conn, color="red") - b = pf.add_to_plot(p, "FPE Ana. N7V Current", "IMIR_PDU_I_ANA_N7V", start, end, conn, y_axis="current", color="blue") - p.add_layout(LinearAxis(y_range_name="current", axis_label="Current (mA)", axis_label_text_color="blue"), 'right') - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def fpe_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
2.5V Ref and FPE DiggIMIR_SPW_V_DIG_2R5V
- IMIR_PDU_V_REF_2R5V
FPE 2.5V Digital and FPE 2.5V PDU Reference Voltage
FPE Dig. 5VIMIR_PDU_V_DIG_5V
- IMIR_PDU_I_DIG_5V
FPE 5V Digital Voltage and Current
FPE Ana. 5VIMIR_PDU_V_ANA_5V
- IMIR_PDU_I_ANA_5V
FPE +5V Analog Voltage and Current
FPE Ana. N5VIMIR_PDU_V_ANA_N5V
- IMIR_PDU_I_ANA_N5V
FPE -5V Analog Voltage and Current
FPE Ana. 7VIMIR_PDU_V_ANA_7V
- IMIR_PDU_I_ANA_7V
FPE +7V Analog Voltage and Current
FPE Ana. N7VIMIR_PDU_V_ANA_N7V
- IMIR_PDU_I_ANA_N7V
FPE -7V Analog Voltage and Current
- - """, width=1100) - - plot1 = dig5(conn, start, end) - plot2 = refdig(conn, start, end) - plot3 = ana5(conn, start, end) - plot4 = ana5n(conn, start, end) - plot5 = ana7(conn, start, end) - plot6 = ana7n(conn, start, end) - - lay = gridplot([[plot2, plot1], - [plot3, plot4], - [plot5, plot6]], - merge_tools=False) - - layout = Column(descr, lay) - - tab = Panel(child=layout, title="FPE VOLTAGE/CURRENT") - - return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/ice_voltage_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/ice_voltage_tab.py deleted file mode 100644 index 067f0bca7..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/plots/ice_voltage_tab.py +++ /dev/null @@ -1,295 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for ICE VOLTAGE tab - - Module prepares plots for mnemonics below, combines plots in a grid and - returns tab object. - - Plot 1: - IMIR_HK_ICE_SEC_VOLT1 - IMIR_HK_ICE_SEC_VOLT3 - - Plot 2: - IMIR_HK_ICE_SEC_VOLT2 - - Plot 3: - IMIR_HK_ICE_SEC_VOLT4 : IDLE and HV_ON - - Plot 4: - IMIR_HK_FW_POS_VOLT - IMIR_HK_GW14_POS_VOLT - IMIR_HK_GW23_POS_VOLT - IMIR_HK_CCC_POS_VOLT - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``dashborad.py``, e.g.: - - :: - from .plots.ice_voltage_tab import ice_plots - tab = ice_plots(conn, start, end) - -Dependencies ------------- - User must provide database "miri_database.db" - -""" - -import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import gridplot, Column - - -def volt4(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[4.2, 5], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "ICE_SEC_VOLT4" - pf.add_basic_layout(p) - - # add a line renderer with legend and line thickness - - a = pf.add_to_plot(p, "Volt4 Idle", "IMIR_HK_ICE_SEC_VOLT4_IDLE", start, end, conn, color="orange") - b = pf.add_to_plot(p, "Volt4 Hv on", "IMIR_HK_ICE_SEC_VOLT4_HV_ON", start, end, conn, color="red") - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def volt1_3(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[30, 50], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "ICE_SEC_VOLT1/3" - pf.add_basic_layout(p) - - # add a line renderer with legend and line thickness - a = pf.add_to_plot(p, "Volt1", "IMIR_HK_ICE_SEC_VOLT1", start, end, conn, color="red") - b = pf.add_to_plot(p, "Volt3", "IMIR_HK_ICE_SEC_VOLT3", start, end, conn, color="purple") - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def volt2(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "ICE_SEC_VOLT2" - pf.add_basic_layout(p) - - # add a line renderer with legend and line thickness - a = pf.add_to_plot(p, "Volt2", "IMIR_HK_ICE_SEC_VOLT2", start, end, conn, color="red") - - pf.add_hover_tool(p, [a]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def pos_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - y_range=[280, 300], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "Wheel Sensor Supply" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "FW", "IMIR_HK_FW_POS_VOLT", start, end, conn, color="red") - b = pf.add_to_plot(p, "GW14", "IMIR_HK_GW14_POS_VOLT", start, end, conn, color="purple") - c = pf.add_to_plot(p, "GW23", "IMIR_HK_GW23_POS_VOLT", start, end, conn, color="orange") - d = pf.add_to_plot(p, "CCC", "IMIR_HK_CCC_POS_VOLT", start, end, conn, color="firebrick") - - pf.add_hover_tool(p, [a, b, c, d]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def volt_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
ICE_SEC_VOLT1/3IMIR_HK_ICE_SEC_VOLT1
- IMIR_HK_ICE_SEC_VOLT3
ICE Secondary Voltage (HV) V1 and V3
ICE_SEC_VOLT2IMIR_HK_SEC_VOLT2ICE secondary voltage (HV) V2
ICE_SEC_VOLT4IMIR_HK_SEC_VOLT2ICE secondary voltage (HV) V4 - HV on and IDLE
Wheel Sensor SupplyIMIR_HK_FW_POS_VOLT
- IMIR_HK_GW14_POS_VOLT
- IMIR_HK_GW23_POS_VOLT
- IMIR_HK_CCC_POS_VOLT
Wheel Sensor supply voltages
- - """, width=1100) - - plot1 = volt1_3(conn, start, end) - plot2 = volt2(conn, start, end) - plot3 = volt4(conn, start, end) - plot4 = pos_volt(conn, start, end) - - lay = gridplot([[plot1, plot2], [plot3, plot4]], merge_tools=False) - layout = Column(descr, lay) - - tab = Panel(child=layout, title="ICE VOLTAGE") - - return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/plot_functions.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/plot_functions.py deleted file mode 100644 index bbc284c6e..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/plots/plot_functions.py +++ /dev/null @@ -1,208 +0,0 @@ -#! /usr/bin/env python -"""Auxilary functions for plots - - Module holds functions that are used for several plots. - - -Authors -------- - - Daniel Kühbacher - -Use ---- - - -Dependencies ------------- - -""" -from bokeh.models import BoxAnnotation -from bokeh.models import ColumnDataSource - -import pandas as pd -import numpy as np - -from astropy.time import Time - - -def pol_regression(x, y, rank): - ''' Calculate polynominal regression of certain rank - Parameters - ---------- - x : list - x parameters for regression - y : list - y parameters for regression - rank : int - rank of regression - Return - ------ - y_poly : list - regression y parameters - ''' - z = np.polyfit(x, y, rank) - f = np.poly1d(z) - y_poly = f(x) - return y_poly - - -def add_hover_tool(p, rend): - ''' Append hover tool to plot - parameters - ---------- - p : bokeh figure - declares where to append hover tool - rend : list - list of renderer to append hover tool - ''' - - from bokeh.models import HoverTool - - # activate HoverTool for scatter plot - hover_tool = HoverTool(tooltips= - [ - ('Name', '$name'), - ('Count', '@data_points'), - ('Mean', '@average'), - ('Deviation', '@deviation'), - ], renderers=rend) - # append hover tool - p.tools.append(hover_tool) - - -def add_limit_box(p, lower, upper, alpha=0.1, color="green"): - ''' Adds box to plot - Parameters - ---------- - p : bokeh figure - declares where to append hover tool - lower : float - lower limit of box - upper : float - upper limit of box - alpha : float - transperency of box - color : str - filling color - ''' - box = BoxAnnotation(bottom=lower, top=upper, fill_alpha=alpha, fill_color=color) - p.add_layout(box) - - -def add_to_plot(p, legend, mnemonic, start, end, conn, y_axis="default", color="red", err='n'): - '''Add scatter and line to certain plot and activates hoover tool - Parameters - ---------- - p : bokeh object - defines plot where line and scatter should be added - legend : str - will be showed in legend of plot - mnemonic : str - defines mnemonic to be plotted - start : datetime - sets start time for data query - end : datetime - sets end time for data query - conn : DBobject - connection object to database - y_axis : str (default='default') - used if secon y axis is provided - color : str (default='dred') - defines color for scatter and line plot - Return - ------ - scat : plot scatter object - used for applying hovertools o plots - ''' - - # convert given start and end time to astropy time - start_str = str(Time(start).mjd) - end_str = str(Time(end).mjd) - - # prepare and execute sql query - sql_c = "SELECT * FROM "+mnemonic+" WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" - temp = pd.read_sql_query(sql_c, conn) - - # put data into Dataframe and define ColumnDataSource for each plot - reg = pd.DataFrame({'reg': pol_regression(temp['start_time'], temp['average'], 3)}) - temp = pd.concat([temp, reg], axis=1) - temp['start_time'] = pd.to_datetime(Time(temp['start_time'], format="mjd").datetime) - plot_data = ColumnDataSource(temp) - - # plot data - p.line(x="start_time", y="average", color=color, y_range_name=y_axis, legend=legend, source=plot_data) - scat = p.scatter(x="start_time", y="average", color=color, name=mnemonic, y_range_name=y_axis, legend=legend, source=plot_data) - - # generate error lines if wished - if err != 'n': - # generate error bars - err_xs = [] - err_ys = [] - - for index, item in temp.iterrows(): - err_xs.append((item['start_time'], item['start_time'])) - err_ys.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) - - # plot them - p.multi_line(err_xs, err_ys, color=color, legend=legend) - - return scat - - -def add_to_wplot(p, legend, mnemonic, start, end, conn, nominal, color="red"): - '''Add line plot to figure (for wheelpositions) - Parameters - ---------- - p : bokeh object - defines figure where line schould be plotted - legend : str - will be showed in legend of plot - mnemonic : str - defines mnemonic to be plotted - start : datetime - sets start time for data query - end : datetime - sets end time for data query - conn : DBobject - connection object to database - color : str (default='dred') - defines color for scatter and line plot - ''' - - start_str = str(Time(start).mjd) - end_str = str(Time(end).mjd) - - sql_c = "SELECT * FROM "+mnemonic+" WHERE timestamp BETWEEN "+start_str+" AND "+end_str+" ORDER BY timestamp" - temp = pd.read_sql_query(sql_c, conn) - - # normalize values - temp['value'] -= nominal - # temp['value'] -= 1 - - temp['timestamp'] = pd.to_datetime(Time(temp['timestamp'], format="mjd").datetime) - plot_data = ColumnDataSource(temp) - - p.line(x="timestamp", y="value", color=color, legend=legend, source=plot_data) - p.scatter(x="timestamp", y="value", color=color, legend=legend, source=plot_data) - - -def add_basic_layout(p): - '''Add basic layout to certain plot - Parameters - ---------- - p : bokeh object - defines plot where line and scatter should be added - ''' - p.title.align = "left" - p.title.text_color = "#c85108" - p.title.text_font_size = "25px" - p.background_fill_color = "#efefef" - - p.xaxis.axis_label_text_font_size = "14pt" - p.xaxis.axis_label_text_color = '#2D353C' - p.yaxis.axis_label_text_font_size = "14pt" - p.yaxis.axis_label_text_color = '#2D353C' - - p.xaxis.major_tick_line_color = "firebrick" - p.xaxis.major_tick_line_width = 2 - p.xaxis.minor_tick_line_color = "#c85108" diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/power_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/power_tab.py deleted file mode 100644 index 9d1366def..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/plots/power_tab.py +++ /dev/null @@ -1,247 +0,0 @@ -import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.models import ColumnDataSource, HoverTool -from bokeh.layouts import column - -import pandas as pd - -from astropy.time import Time - - -def power_ice(conn, start, end): - # query data from database - start_str = str(Time(start).mjd) - end_str = str(Time(end).mjd) - - sql_c = "SELECT * FROM SE_ZIMIRICEA_IDLE WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" - _idle = pd.read_sql_query(sql_c, conn) - sql_c = "SELECT * FROM SE_ZIMIRICEA_HV_ON WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" - _hv = pd.read_sql_query(sql_c, conn) - - voltage = 30 - _idle['average'] *= voltage - _hv['average'] *= voltage - - _idle['start_time'] = pd.to_datetime(Time(_idle['start_time'], format="mjd").datetime) - _hv['start_time'] = pd.to_datetime(Time(_hv['start_time'], format="mjd").datetime) - - # set column data source - idle = ColumnDataSource(_idle) - hv = ColumnDataSource(_hv) - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[5, 14], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Power (W)') - - p.grid.visible = True - p.title.text = "POWER ICE" - pf.add_basic_layout(p) - pf.add_limit_box(p, 6, 8, alpha=0.1, color="green") - - # add a line renderer with legend and line thickness - scat1 = p.scatter(x="start_time", y="average", color='orange', legend="Power idle", source=idle) - scat2 = p.scatter(x="start_time", y="average", color='red', legend="Power hv on", source=hv) - p.line(x="start_time", y="average", color='orange', legend="Power idle", source=idle) - p.line(x="start_time", y="average", color='red', legend="Power hv on", source=hv) - - # generate error bars - err_xs_hv = [] - err_ys_hv = [] - err_xs_idle = [] - err_ys_idle = [] - - for index, item in _hv.iterrows(): - err_xs_hv.append((item['start_time'], item['start_time'])) - err_ys_hv.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) - - for index, item in _idle.iterrows(): - err_xs_idle.append((item['start_time'], item['start_time'])) - err_ys_idle.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) - # plot them - p.multi_line(err_xs_hv, err_ys_hv, color='red', legend='Power hv on') - p.multi_line(err_xs_idle, err_ys_idle, color='orange', legend='Power idle') - - # activate HoverTool for scatter plot - hover_tool = HoverTool(tooltips= - [ - ('count', '@data_points'), - ('mean', '@average'), - ('deviation', '@deviation'), - - ], mode='mouse', renderers=[scat1, scat2]) - - p.tools.append(hover_tool) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def power_fpea(conn, start, end): - - start_str = str(Time(start).mjd) - end_str = str(Time(end).mjd) - - sql_c = "SELECT * FROM SE_ZIMIRFPEA WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" - _fpea = pd.read_sql_query(sql_c, conn) - - voltage = 30 - _fpea['average'] *= voltage - - _fpea['start_time'] = pd.to_datetime(Time(_fpea['start_time'], format = "mjd").datetime) - - # set column data source - fpea = ColumnDataSource(_fpea) - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[28.0, 28.5], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Power (W)') - - p.grid.visible = True - p.title.text = "POWER FPE" - pf.add_basic_layout(p) - - # add a line renderer with legend and line thickness - scat1 = p.scatter(x="start_time", y="average", color='orange', legend="Power FPEA", source=fpea) - p.line(x="start_time", y="average", color='orange', legend="Power FPEA", source=fpea) - - err_xs = [] - err_ys = [] - - for index, item in _fpea.iterrows(): - err_xs.append((item['start_time'], item['start_time'])) - err_ys.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) - - # plot them - p.multi_line(err_xs, err_ys, color='orange', legend='Power FPEA') - - # activate HoverTool for scatter plot - hover_tool = HoverTool(tooltips= - [ - ('count', '@data_points'), - ('mean', '@average'), - ('deviation', '@deviation'), - - ], renderers=[scat1]) - p.tools.append(hover_tool) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def currents(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[0, 1.1], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Current (A)') - - p.grid.visible = True - p.title.text = "FPE & ICE Currents" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "ICE Current idle", "SE_ZIMIRICEA_IDLE", start, end, conn, color="red") - b = pf.add_to_plot(p, "ICE Current HV on", "SE_ZIMIRICEA_HV_ON", start, end, conn, color="orange") - c = pf.add_to_plot(p, "FPE Current", "SE_ZIMIRFPEA", start, end, conn, color="brown") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def power_plots(conn, start, end): - - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
POWER ICESE_ZIMIRICEA * 30V (static)Primary power consumption ICE side A - HV on and IDLE
POWER FPESE_ZIMIRIFPEA * 30V (static)Primary power consumption FPE side A
FPE & ICE Voltages/CurrentsSE_ZIMIRFPEA
- SE_ZIMIRCEA - *INPUT VOLTAGE* (missing)
Supply voltage and current ICE/FPE
- - """, width=1100) - - plot1 = power_ice(conn, start, end) - plot2 = power_fpea(conn, start, end) - plot3 = currents(conn, start, end) - - layout = column(descr, plot1, plot2, plot3) - - # layout_volt = row(volt4, volt1_3) - tab = Panel(child=layout, title="POWER") - - return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/temperature_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/temperature_tab.py deleted file mode 100644 index 95154dad3..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/plots/temperature_tab.py +++ /dev/null @@ -1,349 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for TEMPERATURE tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1: - IGDP_MIR_ICE_T1P_CRYO - IGDP_MIR_ICE_T2R_CRYO - IGDP_MIR_ICE_T3LW_CRYO - IGDP_MIR_ICE_T4SW_CRYO - IGDP_MIR_ICE_T5IMG_CRYO - IGDP_MIR_ICE_T6DECKCRYO - IGDP_MIR_ICE_T7IOC_CRYO - IGDP_MIR_ICE_FW_CRYO - IGDP_MIR_ICE_CCC_CRYO - IGDP_MIR_ICE_GW14_CRYO - IGDP_MIR_ICE_GW23_CRYO - IGDP_MIR_ICE_POMP_CRYO - IGDP_MIR_ICE_POMR_CRYO - IGDP_MIR_ICE_IFU_CRYO - IGDP_MIR_ICE_IMG_CRYO - - Plot 2: - ST_ZTC1MIRIA - ST_ZTC2MIRIA - IMIR_PDU_TEMP - IMIR_IC_SCE_ANA_TEMP1 - IMIR_SW_SCE_ANA_TEMP1 - IMIR_LW_SCE_ANA_TEMP1 - IMIR_IC_SCE_DIG_TEMP - IMIR_SW_SCE_DIG_TEMP - IMIR_LW_SCE_DIG_TEMP - - Plot 3: - IGDP_MIR_IC_DET_TEMP - IGDP_MIR_LW_DET_TEMP - IGDP_MIR_SW_DET_TEMP - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``dashborad.py``, e.g.: - - :: - from .plots.temperature_tab import temperature_plots - tab = temperature_plots(conn, start, end) - -Dependencies ------------- - User must provide database "miri_database.db" - -""" -import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.models import ColumnDataSource -from bokeh.layouts import column - -import pandas as pd - -from astropy.time import Time - - -def cryo(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[5.8, 6.4], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "Cryo Temperatures" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "T1P", "IGDP_MIR_ICE_T1P_CRYO", start, end, conn, color="brown") - b = pf.add_to_plot(p, "T2R", "IGDP_MIR_ICE_T2R_CRYO", start, end, conn, color="burlywood") - c = pf.add_to_plot(p, "T3LW", "IGDP_MIR_ICE_T3LW_CRYO", start, end, conn, color="cadetblue") - d = pf.add_to_plot(p, "T4SW", "IGDP_MIR_ICE_T4SW_CRYO", start, end, conn, color="chartreuse") - e = pf.add_to_plot(p, "T5IMG", "IGDP_MIR_ICE_T5IMG_CRYO", start, end, conn, color="chocolate") - f = pf.add_to_plot(p, "T6DECK", "IGDP_MIR_ICE_T6DECKCRYO", start, end, conn, color="coral") - g = pf.add_to_plot(p, "T7IOC", "IGDP_MIR_ICE_T7IOC_CRYO", start, end, conn, color="darkorange") - h = pf.add_to_plot(p, "FW", "IGDP_MIR_ICE_FW_CRYO", start, end, conn, color="crimson") - i = pf.add_to_plot(p, "CCC", "IGDP_MIR_ICE_CCC_CRYO", start, end, conn, color="cyan") - j = pf.add_to_plot(p, "GW14", "IGDP_MIR_ICE_GW14_CRYO", start, end, conn, color="darkblue") - k = pf.add_to_plot(p, "GW23", "IGDP_MIR_ICE_GW23_CRYO", start, end, conn, color="darkgreen") - l = pf.add_to_plot(p, "POMP", "IGDP_MIR_ICE_POMP_CRYO", start, end, conn, color="darkmagenta") - m = pf.add_to_plot(p, "POMR", "IGDP_MIR_ICE_POMR_CRYO", start, end, conn, color="darkcyan") - n = pf.add_to_plot(p, "IFU", "IGDP_MIR_ICE_IFU_CRYO", start, end, conn, color="cornflowerblue") - o = pf.add_to_plot(p, "IMG", "IGDP_MIR_ICE_IMG_CRYO", start, end, conn, color="orange") - - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o]) - - p.legend.location = "bottom_right" - p.legend.orientation = "horizontal" - p.legend.click_policy = "hide" - - return p - - -def temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - start_str = str(Time(start).mjd) - end_str = str(Time(end).mjd) - - sql_c = "SELECT * FROM IGDP_MIR_ICE_INTER_TEMP WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" - temp = pd.read_sql_query(sql_c, conn) - - temp['average'] += 273.15 - reg = pd.DataFrame({'reg': pf.pol_regression(temp['start_time'], temp['average'], 3)}) - temp = pd.concat([temp, reg], axis=1) - - temp['start_time'] = pd.to_datetime(Time(temp['start_time'], format="mjd").datetime) - plot_data = ColumnDataSource(temp) - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[275, 295], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "IEC Temperatures" - pf.add_basic_layout(p) - - p.line(x="start_time", y="average", color="brown", legend="ICE Internal", source=plot_data) - p.scatter(x="start_time", y="average", color="brown", legend="ICE Internal", source=plot_data) - - a = pf.add_to_plot(p, "ICE IEC A", "ST_ZTC1MIRIA", start, end, conn, color="burlywood") - b = pf.add_to_plot(p, "FPE IEC A", "ST_ZTC2MIRIA", start, end, conn, color="cadetblue") - j = pf.add_to_plot(p, "ICE IEC B", "ST_ZTC1MIRIB", start, end, conn, color="blue") - k = pf.add_to_plot(p, "FPE IEC B.", "ST_ZTC2MIRIB", start, end, conn, color="brown") - c = pf.add_to_plot(p, "FPE PDU", "IMIR_PDU_TEMP", start, end, conn, color="chartreuse") - d = pf.add_to_plot(p, "ANA IC", "IMIR_IC_SCE_ANA_TEMP1", start, end, conn, color="chocolate") - e = pf.add_to_plot(p, "ANA SW", "IMIR_SW_SCE_ANA_TEMP1", start, end, conn, color="coral") - f = pf.add_to_plot(p, "ANA LW", "IMIR_LW_SCE_ANA_TEMP1", start, end, conn, color="darkorange") - g = pf.add_to_plot(p, "DIG IC", "IMIR_IC_SCE_DIG_TEMP", start, end, conn, color="crimson") - h = pf.add_to_plot(p, "DIG SW", "IMIR_SW_SCE_DIG_TEMP", start, end, conn, color="cyan") - i = pf.add_to_plot(p, "DIG LW", "IMIR_LW_SCE_DIG_TEMP", start, end, conn, color="darkblue") - - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i, j, k]) - - p.legend.location = "bottom_right" - p.legend.orientation = "horizontal" - p.legend.click_policy = "hide" - - return p - - -def det(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[6.395, 6.41], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "Detector Temperature" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "Det. Temp. IC", "IGDP_MIR_IC_DET_TEMP", start, end, conn, color="red") - b = pf.add_to_plot(p, "Det. Temp. LW", "IGDP_MIR_LW_DET_TEMP", start, end, conn, color="green") - c = pf.add_to_plot(p, "Det. Temp. SW", "IGDP_MIR_SW_DET_TEMP", start, end, conn, color="blue") - - pf.add_hover_tool(p,[a,b,c]) - - p.legend.location = "bottom_right" - p.legend.orientation = "horizontal" - p.legend.click_policy = "hide" - - return p - - -def temperature_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
CRYO TemperaturesIGDP_MIR_ICE_T1P_CRYO
- IGDP_MIR_ICE_T2R_CRYO
- IGDP_MIR_ICE_T3LW_CRYO
- IGDP_MIR_ICE_T4SW_CRYO
- IGDP_MIR_ICE_T5IMG_CRYO
- IGDP_MIR_ICE_T6DECKCRYO
- IGDP_MIR_ICE_T7IOC_CRYO
- IGDP_MIR_ICE_FW_CRYO
- IGDP_MIR_ICE_CCC_CRYO
- IGDP_MIR_ICE_GW14_CRYO
- IGDP_MIR_ICE_GW23_CRYO
- IGDP_MIR_ICE_POMP_CRYO
- IGDP_MIR_ICE_POMR_CRYO
- IGDP_MIR_ICE_IFU_CRYO
- IGDP_MIR_ICE_IMG_CRYO
Deck Nominal Temperature (T1)
- Deck Redundant Temperature (T2)
- LW FPM I/F Temperature (T3)
- SW FPM I/F Temperature (T4)
- IM FPM I/F Temperature (T5)
- A-B Strut Apex Temperature (T6)
- IOC Temperature (T7)
- FWA Temperature
- CCC Temperature
- DGA-A (GW14) Temperature
- DGA-B (GW23) Temperature
- POMH Nominal Temperature
- POMH Redundant Temperature
- MRS (CF) Cal. Source Temperature
- Imager (CI) Cal. Source Temperature
IEC TemperaturesST_ZTC1MIRIA
- ST_ZTC2MIRIA
- ST_ZTC1MIRIB
- ST_ZTC2MIRIB
- IGDP_MIR_ICE_INTER_TEMP
- IMIR_PDU_TEMP
- IMIR_IC_SCE_ANA_TEMP1
- IMIR_SW_SCE_ANA_TEMP1
- IMIR_LW_SCE_ANA_TEMP1
- IMIR_IC_SCE_DIG_TEMP
- IMIR_SW_SCE_DIG_TEMP
- IMIR_LW_SCE_DIG_TEMP
ICE IEC Panel Temp A
- FPE IEC Panel Temp A
- ICE IEC Panel Temp B
- FPE IEC Panel Temp B
- ICE internal Temperature
- FPE PDU Temperature
- FPE SCE Analogue board Temperature IC
- FPE SCE Analogue board Temperature SW
- FPE SCE Analogue board Temperature LW
- FPE SCE Digital board Temperature IC
- FPE SCE Digital board Temperature SW
- FPE SCE Digital board Temperature LW
Detector TemperaturesIGDP_MIR_IC_DET_TEMP
- IGDP_MIR_lW_DET_TEMP
- IGDP_MIR_SW_DET_TEMP
Detector Temperature (IC,SW&LW)
- - """, width=1100) - - plot1 = cryo(conn, start, end) - plot2 = temp(conn, start, end) - plot3 = det(conn, start, end) - - layout = column(descr, plot1, plot2, plot3) - tab = Panel(child=layout, title="TEMPERATURE") - - return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/plots/wheel_ratio_tab.py b/jwql/instrument_monitors/miri_monitors/data_trending/plots/wheel_ratio_tab.py deleted file mode 100644 index 2eb6c62f1..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/plots/wheel_ratio_tab.py +++ /dev/null @@ -1,316 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for FPE VOLTAGE tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1: - IMIR_HK_FW_POS_RATIO_FND - IMIR_HK_FW_POS_RATIO_OPAQUE - IMIR_HK_FW_POS_RATIO_F1000W - IMIR_HK_FW_POS_RATIO_F1130W - IMIR_HK_FW_POS_RATIO_F1280W - IMIR_HK_FW_POS_RATIO_P750L - IMIR_HK_FW_POS_RATIO_F1500W - IMIR_HK_FW_POS_RATIO_F1800W - IMIR_HK_FW_POS_RATIO_F2100W - IMIR_HK_FW_POS_RATIO_F560W - IMIR_HK_FW_POS_RATIO_FLENS - IMIR_HK_FW_POS_RATIO_F2300C - IMIR_HK_FW_POS_RATIO_F770W - IMIR_HK_FW_POS_RATIO_F1550C - IMIR_HK_FW_POS_RATIO_F2550W - IMIR_HK_FW_POS_RATIO_F1140C - IMIR_HK_FW_POS_RATIO_F2550WR - IMIR_HK_FW_POS_RATIO_F1065C - - Plot 2: - IMIR_HK_GW14_POS_RATIO_SHORT - IMIR_HK_GW14_POS_RATIO_MEDIUM - IMIR_HK_GW14_POS_RATIO_LONG - - Plot 3: - IMIR_HK_GW23_POS_RATIO_SHORT - IMIR_HK_GW23_POS_RATIO_MEDIUM - IMIR_HK_GW23_POS_RATIO_LONG - - Plot 4: - IMIR_HK_CCC_POS_RATIO_LOCKED - IMIR_HK_CCC_POS_RATIO_OPEN - IMIR_HK_CCC_POS_RATIO_CLOSED - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``dashboard.py``, e.g.: - - :: - from .plots.wheel_ratio_tab import wheel_plots - tab = wheel_plots(conn, start, end) - -Dependencies ------------- - User must provide database "miri_database.db" - -""" - -import jwql.instrument_monitors.miri_monitors.data_trending.plots.plot_functions as pf -import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import column - - -def gw14(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[-2, 2], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='ratio (normalized)') - - p.grid.visible = True - p.title.text = "DGA-A Ratio" - p.title.align = "left" - pf.add_basic_layout(p) - - pf.add_to_wplot(p, "SHORT", "IMIR_HK_GW14_POS_RATIO_SHORT", start, end, conn, mn.gw14_nominals['SHORT'], color="green") - pf.add_to_wplot(p, "MEDIUM", "IMIR_HK_GW14_POS_RATIO_MEDIUM", start, end, conn, mn.gw14_nominals['MEDIUM'], color="red") - pf.add_to_wplot(p, "LONG", "IMIR_HK_GW14_POS_RATIO_LONG", start, end, conn, mn.gw14_nominals['LONG'], color="blue") - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def gw23(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[-2, 2], - x_axis_type='datetime', - x_axis_label='Date', - y_axis_label='ratio (normalized)') - - p.grid.visible = True - p.title.text = "DGA-B Ratio" - p.title.align = "left" - pf.add_basic_layout(p) - - pf.add_to_wplot(p, "SHORT", "IMIR_HK_GW23_POS_RATIO_SHORT", start, end, conn, mn.gw23_nominals['SHORT'], color="green") - pf.add_to_wplot(p, "MEDIUM", "IMIR_HK_GW23_POS_RATIO_MEDIUM", start, end, conn, mn.gw23_nominals['MEDIUM'], color="red") - pf.add_to_wplot(p, "LONG", "IMIR_HK_GW23_POS_RATIO_LONG", start, end, conn, mn.gw23_nominals['LONG'], color="blue") - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def ccc(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[-2, 2], - x_axis_type='datetime', - x_axis_label='Date', - y_axis_label='ratio (normalized)') - - p.grid.visible = True - p.title.text = "CCC Ratio" - pf.add_basic_layout(p) - - # add_to_wplot(p, "LOCKED", "IMIR_HK_CCC_POS_RATIO_LOCKED", start, end, conn, mn.ccc_nominals['LOCKED'], color="green") - pf.add_to_wplot(p, "OPEN", "IMIR_HK_CCC_POS_RATIO_OPEN", start, end, conn, mn.ccc_nominals['OPEN'], color="red") - pf.add_to_wplot(p, "CLOSED", "IMIR_HK_CCC_POS_RATIO_CLOSED", start, end, conn, mn.ccc_nominals['CLOSED'], color="blue") - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def fw(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[-6, 4], - x_axis_type='datetime', - x_axis_label='Date', - y_axis_label='ratio (normalized)') - - p.grid.visible = True - p.title.text = "Filterwheel Ratio" - pf.add_basic_layout(p) - - pf.add_to_wplot(p, "FND", "IMIR_HK_FW_POS_RATIO_FND", start, end, conn, mn.fw_nominals['FND'], color="green") - pf.add_to_wplot(p, "OPAQUE", "IMIR_HK_FW_POS_RATIO_OPAQUE", start, end, conn, mn.fw_nominals['OPAQUE'], color="red") - pf.add_to_wplot(p, "F1000W", "IMIR_HK_FW_POS_RATIO_F1000W", start, end, conn, mn.fw_nominals['F1000W'], color="blue") - pf.add_to_wplot(p, "F1130W", "IMIR_HK_FW_POS_RATIO_F1130W", start, end, conn, mn.fw_nominals['F1130W'], color="orange") - pf.add_to_wplot(p, "F1280W", "IMIR_HK_FW_POS_RATIO_F1280W", start, end, conn, mn.fw_nominals['F1280W'], color="firebrick") - pf.add_to_wplot(p, "P750L", "IMIR_HK_FW_POS_RATIO_P750L", start, end, conn, mn.fw_nominals['P750L'], color="cyan") - pf.add_to_wplot(p, "F1500W", "IMIR_HK_FW_POS_RATIO_F1500W", start, end, conn, mn.fw_nominals['F1500W'], color="magenta") - pf.add_to_wplot(p, "F1800W", "IMIR_HK_FW_POS_RATIO_F1800W", start, end, conn, mn.fw_nominals['F1800W'], color="burlywood") - pf.add_to_wplot(p, "F2100W", "IMIR_HK_FW_POS_RATIO_F2100W", start, end, conn, mn.fw_nominals['F2100W'], color="cadetblue") - pf.add_to_wplot(p, "F560W", "IMIR_HK_FW_POS_RATIO_F560W", start, end, conn, mn.fw_nominals['F560W'], color="chartreuse") - pf.add_to_wplot(p, "FLENS", "IMIR_HK_FW_POS_RATIO_FLENS", start, end, conn, mn.fw_nominals['FLENS'], color="brown") - pf.add_to_wplot(p, "F2300C", "IMIR_HK_FW_POS_RATIO_F2300C", start, end, conn, mn.fw_nominals['F2300C'], color="chocolate") - pf.add_to_wplot(p, "F770W", "IMIR_HK_FW_POS_RATIO_F770W", start, end, conn, mn.fw_nominals['F770W'], color="darkorange") - pf.add_to_wplot(p, "F1550C", "IMIR_HK_FW_POS_RATIO_F1550C", start, end, conn, mn.fw_nominals['F1550C'], color="darkgreen") - pf.add_to_wplot(p, "F2550W", "IMIR_HK_FW_POS_RATIO_F2550W", start, end, conn, mn.fw_nominals['F2550W'], color="darkcyan") - pf.add_to_wplot(p, "F1140C", "IMIR_HK_FW_POS_RATIO_F1140C", start, end, conn, mn.fw_nominals['F1140C'], color="darkmagenta") - pf.add_to_wplot(p, "F2550WR", "IMIR_HK_FW_POS_RATIO_F2550WR", start, end, conn, mn.fw_nominals['F2550WR'], color="crimson") - pf.add_to_wplot(p, "F1065C", "IMIR_HK_FW_POS_RATIO_F1065C", start, end, conn, mn.fw_nominals['F1065C'], color="cornflowerblue") - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def wheel_ratios(conn, start, end): - '''Combine plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
Filterwheel RatioIMIR_HK_FW_POS_RATIO
- IMIR_HK_FW_CUR_POS
FW position sensor ratio (normalised) and commanded position
DGA-A RatioIMIR_HK_GW14_POS_RATIO
- IMIR_HK_GW14_CUR_POS
DGA-A position sensor ratio (normalised) and commanded position
DGA-B RatioIMIR_HK_GW23_POS_RATIO
- IMIR_HK_GW23_CUR_POS
DGA-B position sensor ratio (normalised) and commanded position
CCC RatioIMIR_HK_CCC_POS_RATIO
- IMIR_HK_CCC_CUR_POS
Contamination Control Cover position sensor ratio (normalised) and commanded position
- - """, width=1100) - - plot1 = fw(conn, start, end) - plot2 = gw14(conn, start, end) - plot3 = gw23(conn, start, end) - plot4 = ccc(conn, start, end) - - layout = column(descr, plot1, plot2, plot3, plot4) - tab = Panel(child=layout, title="WHEEL RATIO") - - return tab diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/__init__.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/condition.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/condition.py deleted file mode 100755 index f9992f517..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/utils/condition.py +++ /dev/null @@ -1,344 +0,0 @@ -#! /usr/bin/env python -"""Module generates conditions over one or more mnemonics - -The modules purpose is to return True/False for any times by reference of -certain conditions. If for instance the condition "x>1" over a defined period of -time is needed, the module looks for all elements where the condition applies -and where it does not apply. This generates two lists, which contain the "start" -and "end" times of the condition. -A futher function combines the start- and endtimes to time-tuples between which -the condition is known as TRUE. A "state" function returns True/False for an -exact time attribute, whereby the condition is represented in binary form. - -Authors -------- - - Daniel Kühbacher - -Use ---- - This module is not prepared for standalone use. - - For use in programm set condition up like below: - - import the module as follow: - >>>import condition as cond - - generate list with required conditions: - >>>con_set = [ cond.equal(m.mnemonic('IMIR_HK_POM_LOOP'),'OFF'), - cond.smaller(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'),1), - cond.greater(m.mnemonic('SE_ZIMIRICEA'),0.2)] - - generate object of condition with the con_set as attribute: - >>>condition_object=cond.condition(con_set) - - Now the condition_object can return a True/False statement wheather - the time given as attribut meets the conditions: - - >>>if condition.state(float(element['Primary Time'])): - -> True when condition for the given time applies - -> False when condition for the given time is not applicable - -Dependencies ------------- - no external files needed - -References ----------- - -Notes ------ - -""" - - -class condition: - """Class to hold several subconditions""" - - # contains list of representative time pairs for each subcondition - cond_time_pairs = [] - # state of the condition - __state = False - - # initializes condition through condition set - def __init__(self, cond_set): - """Initialize object with set of conditions - Parameters - ---------- - cond_set : list - list contains subconditions objects - """ - self.cond_set = cond_set - - # destructor -> take care that all time_pairs are deleted! - def __del__(self): - """Delete object - destructor method""" - del self.cond_time_pairs[:] - - # prints all stored time pairs (for developement only) - def print_times(self): - """Print conditions time pairs on command line (developement)""" - print('Available time pairs:') - for times in self.cond_time_pairs: - print('list: '+str(times)) - - # returns a interval if time is anywhere in between - def get_interval(self, time): - """Returns time interval if availlable, where "time" is in between - Parameters - ---------- - time : float - given time attribute - Return - ------ - time_pair : tuple - pair of start_time and end_time where time is in between - """ - end_time = 10000000 - start_time = 0 - - # do for every condition - for cond in self.cond_time_pairs: - # do for every time pair in condition - for pair in cond: - if (time > pair[0]) and (time < pair[1]): - if (end_time > pair[1]) and (start_time < pair[0]): - start_time = pair[0] - end_time = pair[1] - break - else: - break - - if (end_time != 10000000) and (start_time != 0): - return [start_time, end_time] - else: - return None - - def generate_time_pairs(start_times, end_times): - """Forms time pairs out of start times and end times - Parameters - ---------- - start_times : list - contains all times where a condition applies - end_times : list - contains all times where the condition does not apply - Return - ------ - time_pair : list - list of touples with start and end time - """ - # internal use only - time_pair: float = [] - - # when the conditons doesn´t apply anyway - if not start_times: - time_pair.append((0, 0)) - - # check if the condition indicates an open time range - elif not end_times: - time_pair.append((start_times[0], 0)) - - # generate time pairs - # for each start time a higher or equal end time is searched for - # these times form am touple which is appended to time_pair : list - else: - time_hook = 0 - last_start_time = 0 - - for start in list(sorted(set(start_times))): - - if(start > time_hook): - for end in list(sorted(set(end_times))): - - if end > start: - - time_pair.append((start, end)) - time_hook = end - break - - if list(sorted(set(start_times)))[-1] > list(sorted(set(end_times)))[-1]: - time_pair.append((list(sorted(set(end_times)))[-1], 0)) - - return(time_pair) - - def state(self, time): - """Checks whether condition is true of false at a given time - Parameters - ---------- - time : float - input time for condition query - Return - ------ - state : bool - True/False statement whether the condition applies or not - """ - # returns state of the condition at a given time - # if state(given time)==True -> condition is true - # if state(given time)==False -> condition is false - # #checks condition for every sub condition in condition set (subconditions) - - state = self.__state - - for cond in self.cond_time_pairs: - - if self.__check_subcondition(cond, time): - state = True - else: - state = False - break - - return state - - def __check_subcondition(self, cond, time): - - # if there are no values availlable - if cond[0][0] == 0: - return False - - for time_pair in cond: - #if just a positive time is availlable, return true - if (time_pair[1] == 0) and (time > time_pair[0]): - - return True - - #if given time occurs between a time pair, return true - elif (time_pair[0]) <= time and (time < time_pair[1]): - - return True - - else: - pass - - -class equal(condition): - """Class to hold single "is equal" subcondition""" - - # add attributes to function - start function "cond_time_pairs()" - def __init__(self, mnemonic, value): - """Initializes subconditon - Parameters - ---------- - mnemonic : astropy table - includes mnemomic engineering data and corresponding primary time - value : str - coparison value for equal statement - """ - self.mnemonic = mnemonic - self.value = value - condition.cond_time_pairs.append((self.cond_true_time())) - - # generates a list of time-touples (start_time, end_time) that mark the beginning and end of - # whether the condition is true or not - def cond_true_time(self): - """Filters all values that are equal to a given comparison value - if equal: Primary time -> temp_start - if not equal: Primary time -> temp_end - Return - ------ - time_p : list - list of touples with start and end time - """ - temp_start = [] - temp_end = [] - - for key in self.mnemonic: - - # find all times whoses Raw values equal the given value - if key['value'] == self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - - time_p = condition.generate_time_pairs(temp_start, temp_end) - return time_p - - -class greater(condition): - """Class to hold single "greater than" subcondition""" - - # add attributes to function - start function "cond_time_pairs()" - def __init__(self, mnemonic, value): - """Initializes subconditon - Parameters - ---------- - mnemonic : astropy table - includes mnemomic engineering data and corresponding primary time - value : str - coparison value for equal statement - """ - self.mnemonic = mnemonic - self.value = value - condition.cond_time_pairs.append((self.cond_true_time())) - - def cond_true_time(self): - """Filters all values that are greater than a given comparison value - if equal: Primary time -> temp_start - if not equal: Primary time -> temp_end - Return - ------ - time_p : list - list of touples with start and end time - """ - temp_start: float = [] - temp_end: float = [] - - for key in self.mnemonic: - - # find all times whose Raw values are grater than the given value - if float(key['value']) > self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - - time_p = condition.generate_time_pairs(temp_start, temp_end) - return time_p - - -class smaller(condition): - """Class to hold single "greater than" subcondition""" - - # add attributes to function - start function "cond_time_pairs()" - def __init__(self, mnemonic, value): - """Initializes subconditon - Parameters - ---------- - mnemonic : astropy table - includes mnemomic engineering data and corresponding primary time - value : str - coparison value for equal statement - """ - self.mnemonic = mnemonic - self.value = value - condition.cond_time_pairs.append((self.cond_true_time())) - - def cond_true_time(self): - """Filters all values that are greater than a given comparison value - if equal: Primary time -> temp_start - if not equal: Primary time -> temp_end - Return - ------ - time_p : list - list of touples with start and end time - """ - temp_start: float = [] - temp_end: float = [] - - for key in self.mnemonic: - - # find all times whose Raw values are grater than the given value - if float(key['value']) < self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - - time_p = condition.generate_time_pairs(temp_start, temp_end) - return time_p - - -if __name__ == '__main__': - pass diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/csv_to_AstropyTable.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/csv_to_AstropyTable.py deleted file mode 100755 index b6c6a5e1b..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/utils/csv_to_AstropyTable.py +++ /dev/null @@ -1,149 +0,0 @@ -#! /usr/bin/env python -"""Module for importing and sorting mnemonics - -This module imports a whole set of mnemonics from a .CSV sheet and converts it -to an astropy table. In a second step the table is sorted by its mnemoncis -and for each mnemmonic another astropy table with reduced content is created. -The last step is to append the data (time and engineering value) with its -mnemonic identifier as key to a dictionary. - -Authors -------- - - Daniel Kühbacher - -Use ---- - - -Dependencies ------------- - mnemonics.py -> includes a list of mnemonics to be evaluated - -References ----------- - -Notes ------ - -""" -from astropy.table import Table -from astropy.time import Time -import warnings -import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn - - -class mnemonics: - """class to hold a set of mnemonics""" - - __mnemonic_dict = {} - - def __init__(self, import_path): - """main function of this class - Parameters - ---------- - import_path : str - defines file to import (csv sheet) - """ - imported_data = self.import_CSV(import_path) - length = len(imported_data) - - print('{} was imported - {} lines'.format(import_path, length)) - - # look for every mnmonic given in mnemonicy.py - for mnemonic_name in mn.mnemonic_set_base: - temp = self.sort_mnemonic(mnemonic_name, imported_data) - # append temp to dict with related mnemonic - if temp is not None: - self.__mnemonic_dict.update({mnemonic_name: temp}) - else: - warnings.warn("fatal error") - - def import_CSV(self, path): - """imports csv sheet and converts it to AstropyTable - Parameters - ---------- - path : str - defines path to file to import - Return - ------ - imported_data : AstropyTable - container for imported data - """ - # read data from given *CSV file - imported_data = Table.read(path, format='ascii.basic', delimiter=',') - return imported_data - - # returns table of single mnemonic - def mnemonic(self, name): - """Returns table of one single mnemonic - Parameters - ---------- - name : str - name of mnemonic - Return - ------ - __mnemonic_dict[name] : AstropyTable - corresponding table to mnemonic name - """ - try: - return self.__mnemonic_dict[name] - except KeyError: - print('{} not in list'.format(name)) - - # looks for given mnemonic in given table - # returns list containing astropy tables with sorted mnemonics and engineering values - # adds useful meta data to Table - def sort_mnemonic(self, mnemonic, table): - """Looks for all values in table with identifier "mnemonic" - Converts time string to mjd format - Parameters - ---------- - mnemonic : str - identifies which mnemonic to look for - table : AstropyTable - table that stores mnemonics and data - Return - ------ - mnemonic_table : AstropyTable - stores all data associated with identifier "mnemonic" - """ - - temp1: float = [] - temp2 = [] - - # appends present mnemonic data to temp arrays temp1 and temp2 - for item in table: - try: - if item['Telemetry Mnemonic'] == mnemonic: - # convert time string to mjd format - temp = item['Secondary Time'].replace('/','-').replace(' ', 'T') - t = Time(temp, format='isot') - - temp1.append(t.mjd) - temp2.append(item['EU Value']) - except KeyError: - warnings.warn("{} is not in mnemonic table".format(mnemonic)) - - description = ('time','value') - data = [temp1, temp2] - - # add some meta data - if len(temp1) > 0: - date_start = temp1[0] - date_end = temp1[len(temp1)-1] - info = {'start':date_start, 'end':date_end} - else: - info = {"n":"n"} - - # add name of mnemonic to meta data of list - info['mnemonic'] = mnemonic - info['len'] = len(temp1) - - # table to return - mnemonic_table = Table(data, names=description, - dtype=('f8', 'str'), meta=info) - return mnemonic_table - - -if __name__ == '__main__': - pass diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/mnemonics.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/mnemonics.py deleted file mode 100755 index 1245b97c8..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/utils/mnemonics.py +++ /dev/null @@ -1,450 +0,0 @@ -"""Module lists all neccessary mnemonics for MIRI data trending - -The module includes several lists to import to MIRI data trending monitor program. -The lists are used for data aquisation and to set up the initial database. - -Authors -------- - - Daniel Kühbacher - -Use ---- - import mnemoncis as mn - -Dependencies ------------- - further information to included mnemonics: ############### - -References ----------- - -Notes ------ - -""" - -# all mnemonic used for condition 1 (see: draft) -# "SE_ZBUSVLT", -mnemonic_cond_1 = [ -"SE_ZIMIRICEA", - -"IMIR_HK_ICE_SEC_VOLT4", -"IGDP_MIR_ICE_INTER_TEMP", - -"ST_ZTC1MIRIA", -"ST_ZTC1MIRIB", - -"IGDP_MIR_ICE_T1P_CRYO", -"IGDP_MIR_ICE_T2R_CRYO", -"IGDP_MIR_ICE_T3LW_CRYO", -"IGDP_MIR_ICE_T4SW_CRYO", -"IGDP_MIR_ICE_T5IMG_CRYO", -"IGDP_MIR_ICE_T6DECKCRYO", -"IGDP_MIR_ICE_T7IOC_CRYO", -"IGDP_MIR_ICE_FW_CRYO", -"IGDP_MIR_ICE_CCC_CRYO", -"IGDP_MIR_ICE_GW14_CRYO", -"IGDP_MIR_ICE_GW23_CRYO", -"IGDP_MIR_ICE_POMP_CRYO", -"IGDP_MIR_ICE_POMR_CRYO", -"IGDP_MIR_ICE_IFU_CRYO", -"IGDP_MIR_ICE_IMG_CRYO"] - -# all mnemonics used for condition 2 (see: draft) -mnemonic_cond_2=[ -"SE_ZIMIRFPEA", - -"IMIR_PDU_V_DIG_5V", -"IMIR_PDU_I_DIG_5V", -"IMIR_PDU_V_ANA_5V", -"IMIR_PDU_I_ANA_5V", - -"IMIR_PDU_V_ANA_N5V", -"IMIR_PDU_I_ANA_N5V", - -"IMIR_PDU_V_ANA_7V", -"IMIR_PDU_I_ANA_7V", - -"IMIR_PDU_V_ANA_N7V", -"IMIR_PDU_I_ANA_N7V", - -"IMIR_SPW_V_DIG_2R5V", -"IMIR_PDU_V_REF_2R5V", - -"IGDP_MIR_IC_V_VDETCOM", -"IGDP_MIR_SW_V_VDETCOM", -"IGDP_MIR_LW_V_VDETCOM", - -"IGDP_MIR_IC_V_VSSOUT", -"IGDP_MIR_SW_V_VSSOUT", -"IGDP_MIR_LW_V_VSSOUT", -"IGDP_MIR_IC_V_VRSTOFF", - -"IGDP_MIR_SW_V_VRSTOFF", -"IGDP_MIR_LW_V_VRSTOFF", - -"IGDP_MIR_IC_V_VP", -"IGDP_MIR_SW_V_VP", -"IGDP_MIR_LW_V_VP", - -"IGDP_MIR_IC_V_VDDUC", -"IGDP_MIR_SW_V_VDDUC", -"IGDP_MIR_LW_V_VDDUC", - -"IMIR_PDU_TEMP", - -"ST_ZTC2MIRIA", -"ST_ZTC2MIRIB", - -"IMIR_IC_SCE_ANA_TEMP1", -"IMIR_SW_SCE_ANA_TEMP1", -"IMIR_LW_SCE_ANA_TEMP1", - -"IMIR_IC_SCE_DIG_TEMP", -"IMIR_SW_SCE_DIG_TEMP", -"IMIR_LW_SCE_DIG_TEMP", - -"IGDP_MIR_IC_DET_TEMP", -"IGDP_MIR_LW_DET_TEMP", -"IGDP_MIR_SW_DET_TEMP"] - -#mnemonics for 15 min evaluation -mnemonic_set_15min = mnemonic_cond_1 + mnemonic_cond_2 - -#ICE secondary voltages -> apply to condition3 -mnemonic_cond_3 = [ -"IMIR_HK_ICE_SEC_VOLT1", -"IMIR_HK_ICE_SEC_VOLT2", -"IMIR_HK_ICE_SEC_VOLT3", -"IMIR_HK_ICE_SEC_VOLT4", -"SE_ZIMIRICEA"] - -#filter weel positions -fw_positions = [ -"FND", -"OPAQUE", -"F1000W", -"F1130W", -"F1280W", -"P750L", -"F1500W", -"F1800W", -"F2100W", -"F560W", -"FLENS", -"F2300C", -"F770W", -"F1550C", -"F2550W", -"F1140C", -"F2550WR", -"F1065C"] - -#grating weel positions -gw_positions = [ -"SHORT", -"MEDIUM", -"LONG"] - -#contamination control clap positions -ccc_positions = [ -"LOCKED", -"OPEN", -"CLOSED"] - -fw_nominals = { -"FND" : -164.46, -"OPAQUE" : 380.42, -"F1000W" : -23.88, -"F1130W" : 138.04, -"F1280W" : -298.14, -"P750L" : 12.79, -"F1500W" : -377.32, -"F1800W" : 435.61, -"F2100W" : -126.04, -"F560W" : 218.13, -"FLENS" : -212.44, -"F2300C" : 306.03, -"F770W" : -61.90, -"F1550C" : 188.88, -"F2550W" : -323.65, -"F1140C" : 83.08, -"F2550WR" : -255.18, -"F1065C" : 261.62 } - -gw23_nominals = { -"SHORT" : 619.81, -"MEDIUM" : 373.31, -"LONG" : 441.4} - -gw14_nominals = { -"SHORT" : 627.49, -"MEDIUM" : 342.71, -"LONG" : 408.75 } - -ccc_nominals = { -"LOCKED" : 577.23, -"OPEN" : 507.86, -"CLOSED" : 399.90} - - -#comprises all mnemonics used throughout he programm -mnemonic_set_base = [ -"SE_ZIMIRICEA", -"SE_ZBUSVLT", - -"IMIR_HK_ICE_SEC_VOLT1", -"IMIR_HK_ICE_SEC_VOLT2", -"IMIR_HK_ICE_SEC_VOLT3", -"IMIR_HK_ICE_SEC_VOLT4", - -"IGDP_MIR_ICE_INTER_TEMP", - -"ST_ZTC1MIRIB", -"ST_ZTC1MIRIA", -"ST_ZTC2MIRIB", -"ST_ZTC2MIRIA", - -"IGDP_MIR_ICE_T1P_CRYO", -"IGDP_MIR_ICE_T2R_CRYO", -"IGDP_MIR_ICE_T3LW_CRYO", -"IGDP_MIR_ICE_T4SW_CRYO", -"IGDP_MIR_ICE_T5IMG_CRYO", -"IGDP_MIR_ICE_T6DECKCRYO", -"IGDP_MIR_ICE_T7IOC_CRYO", -"IGDP_MIR_ICE_FW_CRYO", -"IGDP_MIR_ICE_CCC_CRYO", -"IGDP_MIR_ICE_GW14_CRYO", -"IGDP_MIR_ICE_GW23_CRYO", -"IGDP_MIR_ICE_POMP_CRYO", -"IGDP_MIR_ICE_POMR_CRYO", -"IGDP_MIR_ICE_IFU_CRYO", -"IGDP_MIR_ICE_IMG_CRYO", - -"SE_ZIMIRFPEA", - -"IMIR_PDU_V_DIG_5V", -"IMIR_PDU_I_DIG_5V", -"IMIR_PDU_V_ANA_5V", -"IMIR_PDU_I_ANA_5V", - -"IMIR_PDU_V_ANA_N5V", -"IMIR_PDU_I_ANA_N5V", - -"IMIR_PDU_V_ANA_7V", -"IMIR_PDU_I_ANA_7V", - -"IMIR_PDU_V_ANA_N7V", -"IMIR_PDU_I_ANA_N7V", - -"IMIR_SPW_V_DIG_2R5V", -"IMIR_PDU_V_REF_2R5V", - -"IGDP_MIR_IC_V_VDETCOM", -"IGDP_MIR_SW_V_VDETCOM", -"IGDP_MIR_LW_V_VDETCOM", - -"IGDP_MIR_IC_V_VSSOUT", -"IGDP_MIR_SW_V_VSSOUT", -"IGDP_MIR_LW_V_VSSOUT", -"IGDP_MIR_IC_V_VRSTOFF", - -"IGDP_MIR_SW_V_VRSTOFF", -"IGDP_MIR_LW_V_VRSTOFF", - -"IGDP_MIR_IC_V_VP", -"IGDP_MIR_SW_V_VP", -"IGDP_MIR_LW_V_VP", - -"IGDP_MIR_IC_V_VDDUC", -"IGDP_MIR_SW_V_VDDUC", -"IGDP_MIR_LW_V_VDDUC", - -"IMIR_PDU_TEMP", - -"IMIR_IC_SCE_ANA_TEMP1", -"IMIR_SW_SCE_ANA_TEMP1", -"IMIR_LW_SCE_ANA_TEMP1", - -"IMIR_IC_SCE_DIG_TEMP", -"IMIR_SW_SCE_DIG_TEMP", -"IMIR_LW_SCE_DIG_TEMP", - -"IGDP_MIR_IC_DET_TEMP", -"IGDP_MIR_LW_DET_TEMP", -"IGDP_MIR_SW_DET_TEMP", - -"IMIR_HK_IMG_CAL_LOOP", -"IMIR_HK_IFU_CAL_LOOP", -"IMIR_HK_POM_LOOP", -"IGDP_IT_MIR_IC_STATUS", -"IGDP_IT_MIR_LW_STATUS", -"IGDP_IT_MIR_SW_STATUS", - -"IMIR_HK_FW_POS_VOLT", -"IMIR_HK_FW_POS_RATIO", -"IMIR_HK_FW_CUR_POS", - -"IMIR_HK_GW14_POS_VOLT", -"IMIR_HK_GW14_POS_RATIO", -"IMIR_HK_GW14_CUR_POS", - -"IMIR_HK_GW23_POS_VOLT", -"IMIR_HK_GW23_POS_RATIO", -"IMIR_HK_GW23_CUR_POS", - -"IMIR_HK_CCC_POS_RATIO", -"IMIR_HK_CCC_CUR_POS", -"IMIR_HK_CCC_POS_VOLT" ] - -#mnemonic set for setting up database -mnemonic_set_database = [ -"SE_ZIMIRICEA_IDLE", -"SE_ZIMIRICEA_HV_ON", - -"ICE_POWER_IDLE", -"ICE_POWER_HV_ON", - -"FPE_POWER", - -"SE_ZBUSVLT", - -"IMIR_HK_ICE_SEC_VOLT1", -"IMIR_HK_ICE_SEC_VOLT2", -"IMIR_HK_ICE_SEC_VOLT3", -"IMIR_HK_ICE_SEC_VOLT4_IDLE", -"IMIR_HK_ICE_SEC_VOLT4_HV_ON", - -"IGDP_MIR_ICE_INTER_TEMP", - -"ST_ZTC1MIRIB", -"ST_ZTC1MIRIA", -"ST_ZTC2MIRIB", -"ST_ZTC2MIRIA", - -"IGDP_MIR_ICE_T1P_CRYO", -"IGDP_MIR_ICE_T2R_CRYO", -"IGDP_MIR_ICE_T3LW_CRYO", -"IGDP_MIR_ICE_T4SW_CRYO", -"IGDP_MIR_ICE_T5IMG_CRYO", -"IGDP_MIR_ICE_T6DECKCRYO", -"IGDP_MIR_ICE_T7IOC_CRYO", -"IGDP_MIR_ICE_FW_CRYO", -"IGDP_MIR_ICE_CCC_CRYO", -"IGDP_MIR_ICE_GW14_CRYO", -"IGDP_MIR_ICE_GW23_CRYO", -"IGDP_MIR_ICE_POMP_CRYO", -"IGDP_MIR_ICE_POMR_CRYO", -"IGDP_MIR_ICE_IFU_CRYO", -"IGDP_MIR_ICE_IMG_CRYO", - -"SE_ZIMIRFPEA", - -"IMIR_PDU_V_DIG_5V", -"IMIR_PDU_I_DIG_5V", -"IMIR_PDU_V_ANA_5V", -"IMIR_PDU_I_ANA_5V", - -"IMIR_PDU_V_ANA_N5V", -"IMIR_PDU_I_ANA_N5V", - -"IMIR_PDU_V_ANA_7V", -"IMIR_PDU_I_ANA_7V", - -"IMIR_PDU_V_ANA_N7V", -"IMIR_PDU_I_ANA_N7V", - -"IMIR_SPW_V_DIG_2R5V", -"IMIR_PDU_V_REF_2R5V", - -"IGDP_MIR_IC_V_VDETCOM", -"IGDP_MIR_SW_V_VDETCOM", -"IGDP_MIR_LW_V_VDETCOM", - -"IGDP_MIR_IC_V_VSSOUT", -"IGDP_MIR_SW_V_VSSOUT", -"IGDP_MIR_LW_V_VSSOUT", -"IGDP_MIR_IC_V_VRSTOFF", - -"IGDP_MIR_SW_V_VRSTOFF", -"IGDP_MIR_LW_V_VRSTOFF", - -"IGDP_MIR_IC_V_VP", -"IGDP_MIR_SW_V_VP", -"IGDP_MIR_LW_V_VP", - -"IGDP_MIR_IC_V_VDDUC", -"IGDP_MIR_SW_V_VDDUC", -"IGDP_MIR_LW_V_VDDUC", - -"IMIR_PDU_TEMP", - -"IMIR_IC_SCE_ANA_TEMP1", -"IMIR_SW_SCE_ANA_TEMP1", -"IMIR_LW_SCE_ANA_TEMP1", - -"IMIR_IC_SCE_DIG_TEMP", -"IMIR_SW_SCE_DIG_TEMP", -"IMIR_LW_SCE_DIG_TEMP", - -"IGDP_MIR_IC_DET_TEMP", -"IGDP_MIR_LW_DET_TEMP", -"IGDP_MIR_SW_DET_TEMP", - -"IMIR_HK_FW_POS_VOLT", -"IMIR_HK_GW14_POS_VOLT", -"IMIR_HK_GW23_POS_VOLT", -"IMIR_HK_CCC_POS_VOLT"] - -#different tables for wheelpostions -mnemonic_wheelpositions = [ -"IMIR_HK_FW_POS_RATIO_FND", -"IMIR_HK_FW_POS_RATIO_OPAQUE", -"IMIR_HK_FW_POS_RATIO_F1000W", -"IMIR_HK_FW_POS_RATIO_F1130W", -"IMIR_HK_FW_POS_RATIO_F1280W", -"IMIR_HK_FW_POS_RATIO_P750L", -"IMIR_HK_FW_POS_RATIO_F1500W", -"IMIR_HK_FW_POS_RATIO_F1800W", -"IMIR_HK_FW_POS_RATIO_F2100W", -"IMIR_HK_FW_POS_RATIO_F560W", -"IMIR_HK_FW_POS_RATIO_FLENS", -"IMIR_HK_FW_POS_RATIO_F2300C", -"IMIR_HK_FW_POS_RATIO_F770W", -"IMIR_HK_FW_POS_RATIO_F1550C", -"IMIR_HK_FW_POS_RATIO_F2550W", -"IMIR_HK_FW_POS_RATIO_F1140C", -"IMIR_HK_FW_POS_RATIO_F2550WR", -"IMIR_HK_FW_POS_RATIO_F1065C", - -"IMIR_HK_GW14_POS_RATIO_SHORT", -"IMIR_HK_GW14_POS_RATIO_MEDIUM", -"IMIR_HK_GW14_POS_RATIO_LONG", - -"IMIR_HK_GW23_POS_RATIO_SHORT", -"IMIR_HK_GW23_POS_RATIO_MEDIUM", -"IMIR_HK_GW23_POS_RATIO_LONG", - -"IMIR_HK_CCC_POS_RATIO_LOCKED", -"IMIR_HK_CCC_POS_RATIO_OPEN", -"IMIR_HK_CCC_POS_RATIO_CLOSED"] - -fw_pos_mnemonic = [ -"IMIR_HK_FW_POS_RATIO_FND", -"IMIR_HK_FW_POS_RATIO_OPAQUE", -"IMIR_HK_FW_POS_RATIO_F1000W", -"IMIR_HK_FW_POS_RATIO_F1130W", -"IMIR_HK_FW_POS_RATIO_F1280W", -"IMIR_HK_FW_POS_RATIO_P750L", -"IMIR_HK_FW_POS_RATIO_F1500W", -"IMIR_HK_FW_POS_RATIO_F1800W", -"IMIR_HK_FW_POS_RATIO_F2100W", -"IMIR_HK_FW_POS_RATIO_F560W", -"IMIR_HK_FW_POS_RATIO_FLENS", -"IMIR_HK_FW_POS_RATIO_F2300C", -"IMIR_HK_FW_POS_RATIO_F770W", -"IMIR_HK_FW_POS_RATIO_F1550C", -"IMIR_HK_FW_POS_RATIO_F2550W", -"IMIR_HK_FW_POS_RATIO_F1140C", -"IMIR_HK_FW_POS_RATIO_F2550WR", -"IMIR_HK_FW_POS_RATIO_F1065C"] diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/process_data.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/process_data.py deleted file mode 100755 index 2384f954b..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/utils/process_data.py +++ /dev/null @@ -1,387 +0,0 @@ -"""This module holds functions for miri data trending - -All functions in this module are tailored for the miri datatrending application. -Detailed descriptions are given for every function individually. - -------- - - Daniel Kühbacher - -Use ---- - -Dependencies ------------- -MIRI_trend_requestsDRAFT1900201.docx - -References ----------- - -Notes ------ - -""" - -import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn -import jwql.instrument_monitors.miri_monitors.data_trending.utils.condition as cond -import warnings -from collections import defaultdict - - -def extract_data(condition, mnemonic): - '''Function extracts data from given mnemmonic at a given condition - Parameters - ---------- - condition : object - conditon object that holds one or more subconditions - mnemonic : AstropyTable - holds single table with mnemonic data - Return - ------ - temp : list or None - holds data that applies to given condition - ''' - temp = [] - - # look for all values that fit to the given conditions - for element in mnemonic: - if condition.state(float(element['time'])): - temp.append(float(element['value'])) - - # return temp is one ore more values fit to the condition - # return None if no applicable data was found - if len(temp) > 0: - return temp - else: - return None - - -def extract_filterpos1(condition, nominals, ratio_mnem, pos_mnem): - '''Extracts ratio values which correspond to given position values and their - proposed nominals - Parameters - ---------- - condition : object - conditon object that holds one or more subconditions - nominals : dict - holds nominal values for all wheel positions - ratio_mem : AstropyTable - holds ratio values of one specific mnemonic - pos_mem : AstropyTable - holds pos values of one specific mnemonic - Return - ------ - pos_values : dict - holds ratio values and times with corresponding positionlabel as key - ''' - - # initilize empty dict - pos_values = defaultdict(list) - - # do for every position in mnemonic attribute - for pos in pos_mnem: - - # raise warning if position is UNKNOWN - if pos['value'] != "UNKNOWN": - - # request time interval where the current positon is in between - interval = condition.get_interval(pos['time']) - - # get all ratio values in the interval - - # check if condition attribute for current positon is true - if interval is not None: - cur_pos_time = pos['time'] - - for ratio in ratio_mnem: - - # look for ratio values which are in the same time interval - # and differ a certain value (here 5mV) from the nominal - if (ratio['time'] >= cur_pos_time) and \ - (abs(float(ratio['value']) - nominals.get(pos['value'])) < 5): - - if (ratio['time'] > interval[0]) and (ratio['time'] < interval[1]): - pos_values[pos['value']].append((ratio['time'], ratio['value'])) - - else: - warnings.warn("UNKNOWN Position") - return pos_values - - -def extract_filterpos(condition, nominals, ratio_mnem, pos_mnem): - '''Extracts ratio values which correspond to given position values and their - proposed nominals - Parameters - ---------- - condition : object - conditon object that holds one or more subconditions - nominals : dict - holds nominal values for all wheel positions - ratio_mem : AstropyTable - holds ratio values of one specific mnemonic - pos_mem : AstropyTable - holds pos values of one specific mnemonic - Return - ------ - pos_values : dict - holds ratio values and times with corresponding positionlabel as key - ''' - - # initilize empty dict for assigned ratio values - pos_values = defaultdict(list) - - for index, pos in enumerate(pos_mnem): - - # raise warning if position is UNKNOWN - if pos['value'] != "UNKNOWN": - - # set up interval beween where the pos value was timed and the supply - interval = condition.get_interval(pos['time']) - - if interval is None: - continue - else: - interval[0] = pos['time'] - if pos_mnem[index+1]['time'] < interval[1]: - interval[1] = pos_mnem[index+1]['time'] - - # empty list for pos values - interval_ratios = [] - - # get all ratio values in the interval - for ratio in ratio_mnem: - if (ratio['time'] >= interval[0]) and (ratio['time'] < interval[1]): - interval_ratios.append(ratio) - elif ratio['time'] >= interval[1]: - break - - # check wheather pos values are in range of these checkvals - window = 1 - found_value = False - - while found_value is False: - for ratio in interval_ratios: - if (abs(float(ratio['value']) - nominals.get(pos['value'])) < window): - found_value = True - pos_values[pos['value']].append((ratio['time'], ratio['value'])) - break - - window += 2 - - if window > 10: - print('ratio error') - break - - else: - warnings.warn("UNKNOWN Position") - return pos_values - - -def once_a_day_routine(mnemonic_data): - '''Proposed routine for processing a 15min data file once a day - Parameters - ---------- - mnemonic_data : dict - dict holds time and value in a astropy table with correspining identifier as key - Return - ------ - data_cond_1 : dict - holds extracted data with condition 1 applied - data_cond_1 : dict - holds extracted data with condition 2 applied - ''' - - # abbreviate attribute - m = mnemonic_data - returndata = dict() - - ######################################################################### - con_set_1 = [ - cond.equal(m.mnemonic('IMIR_HK_IMG_CAL_LOOP'), 'OFF'), - cond.equal(m.mnemonic('IMIR_HK_IFU_CAL_LOOP'), 'OFF'), - cond.equal(m.mnemonic('IMIR_HK_POM_LOOP'), 'OFF'), - cond.smaller(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'), 1.0), - cond.greater(m.mnemonic('SE_ZIMIRICEA'), 0.2)] - # setup condition - condition_1 = cond.condition(con_set_1) - - # add filtered engineering values of mnemonics given in list mnemonic_cond_1 - # to dictitonary - for identifier in mn.mnemonic_cond_1: - data = extract_data(condition_1, m.mnemonic(identifier)) - - if data is not None: - returndata.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - - del condition_1 - - ########################################################################## - # under normal use following line should be added: - # cond.equal(m.mnemonic('IGDP_IT_MIR_SW_STATUS'), 'DETECTOR_READY'), \ - # SW was missing in the trainigs data so I could not use it for a condition. - con_set_2 = [ - cond.greater(m.mnemonic('SE_ZIMIRFPEA'), 0.5), - cond.equal(m.mnemonic('IGDP_IT_MIR_IC_STATUS'), 'DETECTOR_READY'), - cond.equal(m.mnemonic('IGDP_IT_MIR_LW_STATUS'), 'DETECTOR_READY')] - # setup condition - condition_2 = cond.condition(con_set_2) - - # add filtered engineering values of mnemonics given in list mnemonic_cond_2 - # to dictitonary - for identifier in mn.mnemonic_cond_2: - data = extract_data(condition_2, m.mnemonic(identifier)) - - if data is not None: - returndata.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - - del condition_2 - - return returndata - - -def whole_day_routine(mnemonic_data): - '''Proposed routine for processing data representing a whole day - Parameters - ---------- - mnemonic_data : dict - dict holds time and value in a astropy table with correspining identifier as key - Return - ------ - data_cond_3 : dict - holds extracted data with condition 3 applied - FW_volt : list - extracted data for IMIR_HK_FW_POS_VOLT - GW14_volt : list - extracted data for IMIR_HK_GW14_POS_VOLT - GW23_volt : list - extracted data for IMIR_HK_GW23_POS_VOLT - CCC_volt : list - extracted data for IMIR_HK_CCC_POS_VOLT - ''' - - # abbreviate attribute - m = mnemonic_data - returndata = dict() - - ######################################################################### - con_set_3 = [ - cond.greater(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'), 25.0)] - # setup condition - condition_3 = cond.condition(con_set_3) - - # add filtered engineering values of mnemonics given in list mnemonic_cond_3 - # to dictitonary - for identifier in mn.mnemonic_cond_3: - data = extract_data(condition_3, m.mnemonic(identifier)) - - if data is not None: - returndata.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - - del condition_3 - - ######################################################################### - # extract data for IMIR_HK_FW_POS_VOLT under given condition - con_set_FW = [ - cond.greater(m.mnemonic('IMIR_HK_FW_POS_VOLT'), 250.0)] - # setup condition - condition_FW = cond.condition(con_set_FW) - FW_volt = extract_data(condition_FW, m.mnemonic('IMIR_HK_FW_POS_VOLT')) - returndata.update({'IMIR_HK_FW_POS_VOLT':FW_volt}) - del condition_FW - - # extract data for IMIR_HK_GW14_POS_VOLT under given condition - con_set_GW14 = [ - cond.greater(m.mnemonic('IMIR_HK_GW14_POS_VOLT'), 250.0)] - # setup condition - condition_GW14 = cond.condition(con_set_GW14) - GW14_volt = extract_data(condition_GW14, m.mnemonic('IMIR_HK_GW14_POS_VOLT')) - returndata.update({'IMIR_HK_GW14_POS_VOLT': GW14_volt}) - del condition_GW14 - - # extract data for IMIR_HK_GW23_POS_VOLT under given condition - con_set_GW23 = [ - cond.greater(m.mnemonic('IMIR_HK_GW23_POS_VOLT'), 250.0)] - # setup condition - condition_GW23 = cond.condition(con_set_GW23) - GW23_volt = extract_data(condition_GW23, m.mnemonic('IMIR_HK_GW23_POS_VOLT')) - returndata.update({'IMIR_HK_GW23_POS_VOLT': GW23_volt}) - del condition_GW23 - - # extract data for IMIR_HK_CCC_POS_VOLT under given condition - con_set_CCC = [ - cond.greater(m.mnemonic('IMIR_HK_CCC_POS_VOLT'), 250.0)] - # setup condition - condition_CCC = cond.condition(con_set_CCC) - CCC_volt = extract_data(condition_CCC, m.mnemonic('IMIR_HK_CCC_POS_VOLT')) - returndata.update({'IMIR_HK_CCC_POS_VOLT': CCC_volt}) - del condition_CCC - - return returndata - - -def wheelpos_routine(mnemonic_data): - '''Proposed routine for positionsensors each day - Parameters - ---------- - mnemonic_data : dict - dict holds time and value in a astropy table with correspining identifier as key - Return - ------ - FW : dict - holds FW ratio values and times with corresponding positionlabel as key - GW14 : dict - holds GW14 ratio values and times with corresponding positionlabel as key - GW23 : dict - holds GW23 ratio values and times with corresponding positionlabel as key - CCC : dict - holds CCC ratio values and times with corresponding positionlabel as key - ''' - - # abbreviate attribute - m = mnemonic_data - - con_set_FW = [ - cond.greater(m.mnemonic('IMIR_HK_FW_POS_VOLT'), 250.0)] - # setup condition - condition_FW = cond.condition(con_set_FW) - FW = extract_filterpos(condition_FW, mn.fw_nominals, - m.mnemonic('IMIR_HK_FW_POS_RATIO'), m.mnemonic('IMIR_HK_FW_CUR_POS')) - - del condition_FW - - con_set_GW14 = [ - cond.greater(m.mnemonic('IMIR_HK_GW14_POS_VOLT'), 250.0)] - # setup condition - condition_GW14 = cond.condition(con_set_GW14) - GW14 = extract_filterpos(condition_GW14, mn.gw14_nominals, - m.mnemonic('IMIR_HK_GW14_POS_RATIO'), m.mnemonic('IMIR_HK_GW14_CUR_POS')) - - del condition_GW14 - - con_set_GW23 = [ - cond.greater(m.mnemonic('IMIR_HK_GW23_POS_VOLT'), 250.0)] - # setup condition - condition_GW23 = cond.condition(con_set_GW23) - GW23 = extract_filterpos(condition_GW23, mn.gw23_nominals, - m.mnemonic('IMIR_HK_GW23_POS_RATIO'), m.mnemonic('IMIR_HK_GW23_CUR_POS')) - - del condition_GW23 - - con_set_CCC = [ - cond.greater(m.mnemonic('IMIR_HK_CCC_POS_VOLT'), 250.0)] - # setup condition - condition_CCC = cond.condition(con_set_CCC) - CCC = extract_filterpos(condition_CCC, mn.ccc_nominals, - m.mnemonic('IMIR_HK_CCC_POS_RATIO'), m.mnemonic('IMIR_HK_CCC_CUR_POS')) - - del condition_CCC - - return FW, GW14, GW23, CCC - -if __name__ =='__main__': - pass diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/utils/sql_interface.py b/jwql/instrument_monitors/miri_monitors/data_trending/utils/sql_interface.py deleted file mode 100755 index c07bd2086..000000000 --- a/jwql/instrument_monitors/miri_monitors/data_trending/utils/sql_interface.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Module holds functions to generate and access sqlite databases - -The module is tailored for use in miri data trending. It holds functions to -create and close connections to a sqlite database. Calling the module itself -creates a sqlite database with specific tables used at miri data trending. - -Authors -------- - - Daniel Kühbacher - -Use ---- - -Dependencies ------------- - import mnemonics as m - -References ----------- - -Notes ------ - -""" -import os -import sqlite3 -from sqlite3 import Error - -import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as m -from jwql.utils.utils import get_config - - -def create_connection(db_file): - '''Sets up a connection or builds database - Parameters - ---------- - db_file : string - represents filename of database - Return - ------ - conn : DBobject or None - Connection object or None - ''' - try: - conn = sqlite3.connect(db_file) - print('Connected to database "{}"'.format(db_file)) - return conn - except Error as e: - print(e) - return None - - -def close_connection(conn): - '''Closes connection to database - Parameters - ---------- - conn : DBobject - Connection object to be closed - ''' - conn.close() - print('Connection closed') - - -def add_data(conn, mnemonic, data): - '''Add data of a specific mnemonic to database if it not exists - Parameters - ---------- - conn : DBobject - connection object to access database - mnemonic : string - identifies the table - data : list - specifies the data - ''' - - c = conn.cursor() - - # check if data already exists (start_time as identifier) - c.execute('SELECT id from {} WHERE start_time= {}'.format(mnemonic, data[0])) - temp = c.fetchall() - - if len(temp) == 0: - c.execute('INSERT INTO {} (start_time,end_time,data_points,average,deviation) \ - VALUES (?,?,?,?,?)'.format(mnemonic), data) - conn.commit() - else: - print('data already exists') - - -def add_wheel_data(conn, mnemonic, data): - '''Add data of a specific wheel position to database if it not exists - Parameters - ---------- - conn : DBobject - connection object to access database - mnemonic : string - identifies the table - data : list - specifies the data - ''' - - c = conn.cursor() - - # check if data already exists (start_time) - c.execute('SELECT id from {} WHERE timestamp = {}'.format(mnemonic, data[0])) - temp = c.fetchall() - - if len(temp) == 0: - c.execute('INSERT INTO {} (timestamp, value) \ - VALUES (?,?)'.format(mnemonic),data) - conn.commit() - else: - print('data already exists') - - -def main(): - ''' Creates SQLite database with tables proposed in mnemonics.py''' - - __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - - # generate paths - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db') - - conn = create_connection(DATABASE_FILE) - - c = conn.cursor() - - for mnemonic in m.mnemonic_set_database: - try: - c.execute('CREATE TABLE IF NOT EXISTS {} ( \ - id INTEGER, \ - start_time REAL, \ - end_time REAL, \ - data_points INTEGER, \ - average REAL, \ - deviation REAL, \ - performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ - PRIMARY KEY (id));'.format(mnemonic)) - except Error as e: - print('e') - - for mnemonic in m.mnemonic_wheelpositions: - try: - c.execute('CREATE TABLE IF NOT EXISTS {} ( \ - id INTEGER, \ - timestamp REAL, \ - value REAL, \ - performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ - PRIMARY KEY (id));'.format(mnemonic)) - except Error as e: - print('e') - - print("Database initial setup complete") - conn.commit() - close_connection(conn) - - -if __name__ == "__main__": - # sets up database if called as main - main() - print("sql_interface.py done") diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/15min_to_db.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/15min_to_db.py deleted file mode 100644 index 942e5602a..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/15min_to_db.py +++ /dev/null @@ -1,93 +0,0 @@ -import statistics -import os -import glob -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.csv_to_AstropyTable as apt -from jwql.utils.utils import get_config - -from jwql.instrument_monitors.nirspec_monitors.data_trending.utils.process_data import once_a_day_routine - -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - -# point to the directory where your files are located! -directory = os.path.join(get_config()['outputs'], 'nirspec_data_trending', 'nirspec_new_15min', '*.CSV') - -# there some some files contain the same data but they are all incomplete -# in order to generate a full database we have to import all of them -filenames = glob.glob(directory) - - -def process_file(conn, path): - '''Parse CSV file, process data within and put to DB - Parameters - ---------- - conn : DBobject - Connection object to temporary database - path : str - defines path to the files - ''' - - # import mnemonic data and append dict to variable below - m_raw_data = apt.mnemonics(path) - - # process raw data with once a day routine - returndata = once_a_day_routine(m_raw_data) - - # put all data in a database that uses a condition - for key, value in returndata.items(): - m = m_raw_data.mnemonic(key) - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - # add rest of the data to database - for identifier in mn.mnemSet_15min: - - m = m_raw_data.mnemonic(identifier) - - temp = [] - - # look for all values that fit to the given conditions - for element in m: - temp.append(float(element['value'])) - - # return None if no applicable data was found - if len(temp) > 2: - length = len(temp) - mean = statistics.mean(temp) - deviation = statistics.stdev(temp) - - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, identifier, dataset) - elif len(temp) == 2: - dataset = (float(element['time']), float(element['time']), 1, temp[0], 0) - sql.add_data(conn, identifier, dataset) - else: - print('No data for {}'.format(identifier)) - print(temp) - - del temp - - -def main(): - # generate paths - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') - - # connect to temporary database - conn = sql.create_connection(DATABASE_FILE) - - # do for every file in list above - for path in filenames: - process_file(conn, path) - - # close connection - sql.close_connection(conn) - print("done") - - -if __name__ == "__main__": - main() diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/__init__.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/dashboard.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/dashboard.py deleted file mode 100644 index 8a4802230..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/dashboard.py +++ /dev/null @@ -1,100 +0,0 @@ -#! /usr/bin/env python -"""Combines plots to tabs and prepares dashboard - -The module imports all prepares plot functions from .plots and combines -prebuilt tabs to a dashboard. Furthermore it defines the timerange for -the visualisation. Default time_range should be set to about 4 Month (120days) - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``data_container.py``, e.g.: - - :: - import jwql.instrument_monitors.miri_monitors.data_trending.dashboard as dash - dashboard, variables = dash.data_trending_dashboard(start_time, end_time) - -Dependencies ------------- - User must provide "nirspec_database.db" in folder jwql/database - -""" -import os -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql -from jwql.utils.utils import get_config - -from bokeh.embed import components -from bokeh.models.widgets import Tabs - -import datetime - -# import plot functions -from .plots.power_tab import power_plots -from .plots.voltage_tab import volt_plots -from .plots.temperature_tab import temperature_plots -from .plots.msa_mce_tab import msa_mce_plots -from .plots.fpe_fpa_tab import fpe_fpa_plots -from .plots.caa_tab import caa_plots -from .plots.wheel_tab import wheel_pos - -# configure actual datetime in order to implement range function -now = datetime.datetime.now() -# default_start = now - datetime.timedelta(1000) -default_start = datetime.date(2017, 8, 15).isoformat() - -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - - -def data_trending_dashboard(start=default_start, end=now): - """Bulilds dashboard - Parameters - ---------- - start : time - configures start time for query and visualisation - end : time - configures end time for query and visualisation - Return - ------ - plot_data : list - A list containing the JavaScript and HTML content for the dashboard - variables : dict - no use - """ - - # connect to database - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') - - conn = sql.create_connection(DATABASE_FILE) - - # some variables can be passed to the template via following - variables = dict(init=1) - - # some variables can be passed to the template via following - variables = dict(init=1) - - # add tabs to dashboard - tab1 = power_plots(conn, start, end) - tab2 = volt_plots(conn, start, end) - tab3 = temperature_plots(conn, start, end) - tab4 = msa_mce_plots(conn, start, end) - tab5 = fpe_fpa_plots(conn, start, end) - tab6 = caa_plots(conn, start, end) - tab7 = wheel_pos(conn, start, end) - - # build dashboard - tabs = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5, tab6, tab7]) - # tabs = Tabs( tabs=[ tab1, tab7] ) - - # return dasboard to webapp - script, div = components(tabs) - plot_data = [div, script] - - # close sql connection - sql.close_connection(conn) - - return plot_data, variables diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/day_to_db.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/day_to_db.py deleted file mode 100644 index 96ab15b59..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/day_to_db.py +++ /dev/null @@ -1,116 +0,0 @@ -import statistics -import os -import glob -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.csv_to_AstropyTable as apt -from jwql.utils.utils import get_config - -from jwql.instrument_monitors.nirspec_monitors.data_trending.utils.process_data import whole_day_routine, wheelpos_routine - -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - -# point to the directory where your files are located! -directory = os.path.join(get_config()['outputs'], 'nirspec_data_trending', 'nirspec_more', '*.CSV') - -# there some some files contain the same data but they are all incomplete -# in order to generate a full database we have to import all of them -filenames = glob.glob(directory) -test = "FOFTLM2019073163845064.CSV" - - -def process_file(conn, path): - '''Parse CSV file, process data within and put to DB - Parameters - ---------- - conn : DBobject - Connection object to temporary database - path : str - defines path to the files - ''' - - # import mnemonic data and append dict to variable below - m_raw_data = apt.mnemonics(path) - - # process raw data with once a day routine - return_data, lamp_data = whole_day_routine(m_raw_data) - FW, GWX, GWY = wheelpos_routine(m_raw_data) - - for key, values in FW.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_FWA_POSITION_{}'.format(key), data) - - for key, values in GWX.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_GWA_X_POSITION_{}'.format(key), data) - - for key, values in GWY.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_GWA_Y_POSITION_{}'.format(key), data) - - # put all data to a database that uses a condition - for key, value in return_data.items(): - m = m_raw_data.mnemonic(key) - length = len(value) - if length > 2: - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - # add rest of the data to database -> no conditions applied - for identifier in mn.mnemSet_day: - - m = m_raw_data.mnemonic(identifier) - - temp = [] - - # look for all values that fit to the given conditions - for element in m: - temp.append(float(element['value'])) - - # return None if no applicable data was found - if len(temp) > 2: - length = len(temp) - mean = statistics.mean(temp) - deviation = statistics.stdev(temp) - - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, identifier, dataset) - else: - print('No data for {}'.format(identifier)) - print(temp) - - del temp - # add lamp data to database -> distiction over lamps - for key, values in lamp_data.items(): - for data in values: - dataset_volt = (data[0], data[1], data[5], data[6], data[7]) - dataset_curr = (data[0], data[1], data[2], data[3], data[4]) - sql.add_data(conn, 'LAMP_{}_VOLT'.format(key), dataset_volt) - sql.add_data(conn, 'LAMP_{}_CURR'.format(key), dataset_curr) - - -def main(): - # generate paths - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') - - # connect to temporary database - conn = sql.create_connection(DATABASE_FILE) - - ''' - path = directory + test - process_file(conn, path) - ''' - # do for every file in list above - for path in filenames: - process_file(conn, path) - - # close connection - sql.close_connection(conn) - print("done") - - -if __name__ == "__main__": - main() diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py deleted file mode 100644 index c8388ee6f..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py +++ /dev/null @@ -1,177 +0,0 @@ -#! /usr/bin/env python -''' Main module for nirspec datatrending -> fills database - - This module holds functions to connect with the engineering database in order - to grab and process data for the specific miri database. The scrips queries - a daily 15 min chunk and a whole day dataset. These contain several mnemonics - defined in ''mnemonics.py''. The queried data gets processed and stored in - a prepared database. - -Authors -------- - - - Daniel Kühbacher - -Use ---- - -Dependencies ------------- - -References ----------- - -Notes ------ -''' -import utils.mnemonics as mn -import utils.sql_interface as sql -from utils.process_data import whole_day_routine, wheelpos_routine -from jwql.utils.engineering_database import query_single_mnemonic - -import statistics - - -def process_daysample(conn, m_raw_data): - '''Parse CSV file, process data within and put to DB - Parameters - ---------- - conn : DBobject - Connection object to temporary database - path : str - defines path to the files - ''' - - # process raw data with once a day routine - return_data, lamp_data = whole_day_routine(m_raw_data) - FW, GWX, GWY = wheelpos_routine(m_raw_data) - - # put all data to a database that uses a condition - for key, value in return_data.items(): - m = m_raw_data.mnemonic(key) - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - # add rest of the data to database -> no conditions applied - for identifier in mn.mnemSet_day: - m = m_raw_data.mnemonic(identifier) - temp = [] - # look for all values that fit to the given conditions - for element in m: - temp.append(float(element['value'])) - # return None if no applicable data was found - if len(temp) > 2: - length = len(temp) - mean = statistics.mean(temp) - deviation = statistics.stdev(temp) - else: - print('No data for {}'.format(identifier)) - del temp - - # add lamp data to database -> distiction over lamps - for key, values in lamp_data.items(): - for data in values: - dataset_volt = (data[0], data[1], data[5], data[6], data[7]) - dataset_curr = (data[0], data[1], data[2], data[3], data[4]) - sql.add_data(conn, 'LAMP_{}_VOLT'.format(key), dataset_volt) - sql.add_data(conn, 'LAMP_{}_CURR'.format(key), dataset_curr) - - - # add wheeldata to database - for key, values in FW.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_FWA_POSITION_{}'.format(key), data) - - for key, values in GWX.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_GWA_X_POSITION_{}'.format(key), data) - - for key, values in GWY.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_GWA_Y_POSITION_{}'.format(key), data) - - -def process_15minsample(conn, m_raw_data): - '''Parse CSV file, process data within and put to DB - Parameters - ---------- - conn : DBobject - Connection object to temporary database - path : str - defines path to the files - ''' - - # process raw data with once a day routine - returndata = once_a_day_routine(m_raw_data) - - # put all data in a database that uses a condition - for key, value in returndata.items(): - m = m_raw_data.mnemonic(key) - length = len(value) - mean = statistics.mean(value) - deviation = statistics.stdev(value) - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, key, dataset) - - # add rest of the data to database - for identifier in mn.mnemSet_15min: - - m = m_raw_data.mnemonic(identifier) - - temp = [] - - # look for all values that fit to the given conditions - for element in m: - temp.append(float(element['value'])) - - # return None if no applicable data was found - if len(temp) > 2: - length = len(temp) - mean = statistics.mean(temp) - deviation = statistics.stdev(temp) - - dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation) - sql.add_data(conn, identifier, dataset) - elif len(temp) == 2: - dataset = (float(element['time']), float(element['time']), 1, temp[0], 0) - sql.add_data(conn, identifier, dataset) - else: - print('No data for {}'.format(identifier)) - print(temp) - - del temp - - -def main(): - - ''' - from ..utils.engineering_database import query_single_mnemonic - - mnemonic_identifier = 'SA_ZFGOUTFOV' - start_time = Time(2016.0, format='decimalyear') - end_time = Time(2018.1, format='decimalyear') - - - mnemonic = query_single_mnemonic(mnemonic_identifier, start_time, end_time) - assert len(mnemonic.data) == mnemonic.meta['paging']['rows'] - ''' - - for mnemonic in mn.mnemonic_set_15min: - whole_day.update(mnemonic=query_single_mnemonic(mnemonic, start, end)) - - # generate paths - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') - - # connect to temporary database - conn = sql.create_connection(DATABASE_FILE) - - process_daysample(conn, table_day) - process_15minsample(conn, table_15min) - - # close connection - sql.close_connection(conn) - print("done") diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/__init__.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/caa_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/caa_tab.py deleted file mode 100644 index 4f9d45ef6..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/caa_tab.py +++ /dev/null @@ -1,239 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for CAA tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1 - Lamp Voltages and Currents (Distincted) - INRSH_LAMP_SEL - INRSI_C_CAA_CURRENT - INRSI_C_CAA_VOLTAGE - - Plot 2 - CAA (Voltages and Currents) - INRSH_CAA_VREFOFF - INRSH_CAA_VREF - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``nirspec_dashboard.py``, e.g.: - - :: - from .plots.voltage_tab import voltage_plots - tab = voltage_plots(conn, start, end) - -Dependencies ------------- - User must provide database "nirpsec_database.db" - -""" -import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import Column - - -def lamp_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=800, - y_range=[1.2, 2.3], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "CAA Lamp Voltages" - pf.add_basic_layout(p) - - l = pf.add_to_plot(p, "FLAT1", "LAMP_FLAT1_VOLT", start, end, conn, color="red") - m = pf.add_to_plot(p, "FLAT2", "LAMP_FLAT2_VOLT", start, end, conn, color="green") - n = pf.add_to_plot(p, "FLAT3", "LAMP_FLAT3_VOLT", start, end, conn, color="blue") - o = pf.add_to_plot(p, "FLAT4", "LAMP_FLAT4_VOLT", start, end, conn, color="brown") - x = pf.add_to_plot(p, "FLAT5", "LAMP_FLAT5_VOLT", start, end, conn, color="orange") - q = pf.add_to_plot(p, "LINE1", "LAMP_LINE1_VOLT", start, end, conn, color="cyan") - r = pf.add_to_plot(p, "LINE2", "LAMP_LINE2_VOLT", start, end, conn, color="darkmagenta") - s = pf.add_to_plot(p, "LINE3", "LAMP_LINE3_VOLT", start, end, conn, color="burlywood") - t = pf.add_to_plot(p, "LINE4", "LAMP_LINE4_VOLT", start, end, conn, color="darkkhaki") - u = pf.add_to_plot(p, "REF", "LAMP_REF_VOLT", start, end, conn, color="darkblue") - v = pf.add_to_plot(p, "TEST", "LAMP_TEST_VOLT", start, end, conn, color="goldenrod") - - pf.add_hover_tool(p, [l, m, n, o, x, q, r, s, t, u, v]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def lamp_current(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=600, - y_range=[10.5, 14.5], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Current (mA)') - - p.grid.visible = True - p.title.text = "CAA Lamp currents" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "FLAT1", "LAMP_FLAT1_CURR", start, end, conn, color="red") - b = pf.add_to_plot(p, "FLAT2", "LAMP_FLAT2_CURR", start, end, conn, color="green") - c = pf.add_to_plot(p, "FLAT3", "LAMP_FLAT3_CURR", start, end, conn, color="blue") - d = pf.add_to_plot(p, "FLAT4", "LAMP_FLAT4_CURR", start, end, conn, color="brown") - e = pf.add_to_plot(p, "FLAT5", "LAMP_FLAT5_CURR", start, end, conn, color="orange") - f = pf.add_to_plot(p, "LINE1", "LAMP_LINE1_CURR", start, end, conn, color="cyan") - g = pf.add_to_plot(p, "LINE2", "LAMP_LINE2_CURR", start, end, conn, color="darkmagenta") - h = pf.add_to_plot(p, "LINE3", "LAMP_LINE3_CURR", start, end, conn, color="burlywood") - i = pf.add_to_plot(p, "LINE4", "LAMP_LINE4_CURR", start, end, conn, color="darkkhaki") - j = pf.add_to_plot(p, "REF", "LAMP_REF_CURR", start, end, conn, color="darkblue") - k = pf.add_to_plot(p, "TEST", "LAMP_TEST_CURR", start, end, conn, color="goldenrod") - - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i, j, k]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def caa_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=600, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def caa_plots(conn, start, end): - '''Combines plots to a tab - - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
CAA Lamp VoltagesINRSH_LAMP_SEL
- INRSI_C_CAA_VOLTAGE
Lamp Voltage for each CAA Lamp
CAA Lamp CurrentsINRSH_LAMP_SEL
- INRSI_C_CAA_CURRENT
Lamp Currents for each CAA Lamp
- - """, width=1100) - - plot1 = lamp_volt(conn, start, end) - plot2 = lamp_current(conn, start, end) - # plot3 = caa_plots(conn, start, end) - - layout = Column(descr, plot1, plot2) - - tab = Panel(child=layout, title="CAA/LAMPS") - - return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/fpe_fpa_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/fpe_fpa_tab.py deleted file mode 100644 index f85c64f44..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/fpe_fpa_tab.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for Temperature tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1 - ASIC 1 Voltages - IGDP_NRSD_ALG_A1_VDDA - IGDP_NRSD_ALG_A1GND4VDA - IGDP_NRSD_ALG_A1GND5VRF - INRSD_ALG_A1_VDD3P3 - INRSD_ALG_A1_VDD - INRSD_ALG_A1_REF - INRSD_A1_DSUB_V - INRSD_A1_VRESET_V - INRSD_A1_CELLDRN_V - INRSD_A1_DRAIN_V - INRSD_A1_VBIASGATE_V - INRSD_A1_VBIASPWR_V - - Plot 2 - ASIC 1 Currents - IGDP_NRSD_ALG_A1_VDD_C - IGDP_NRSD_ALG_A1VDAP12C - IGDP_NRSD_ALG_A1VDAN12C - INRSD_A1_VDDA_I - - Plot 3 - ASIC 2 Voltages - IGDP_NRSD_ALG_A2_VDDA - IGDP_NRSD_ALG_A2GND4VDA - IGDP_NRSD_ALG_A2GND5VRF - INRSD_ALG_A2_VDD3P3 - INRSD_ALG_A2_VDD - INRSD_ALG_A2_REF - INRSD_A2_DSUB_V - INRSD_A2_VRESET_V - INRSD_A2_CELLDRN_V - INRSD_A2_DRAIN_V - INRSD_A2_VBIASGATE_V - INRSD_A2_VBIASPWR_V - - Plot 4 - ASIC 2 Currents - IGDP_NRSD_ALG_A2_VDD_C - IGDP_NRSD_ALG_A2VDAP12C - IGDP_NRSD_ALG_A2VDAN12C - INRSD_A2_VDDA_I - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``nirspec_dashboard.py``, e.g.: - - :: - from .plots.fpa_fpe_tab import fpa_fpe_plots - tab = fpa_fpe_plots(conn, start, end) - -Dependencies ------------- - User must provide database "nirspec_database.db" - -""" -import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import Column, Row - - -def asic_1_voltages(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=800, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "ASIC 1 Voltages" - pf.add_basic_layout(p) - a = pf.add_to_plot(p, "VDDA", "IGDP_NRSD_ALG_A1_VDDA", start, end, conn, color="burlywood") - b = pf.add_to_plot(p, "A1GND4VDA", "IGDP_NRSD_ALG_A1GND4VDA", start, end, conn, color="cadetblue") - c = pf.add_to_plot(p, "A1GND5VRF", "IGDP_NRSD_ALG_A1GND5VRF", start, end, conn, color="chartreuse") - d = pf.add_to_plot(p, "A1VDD3P3", "INRSD_ALG_A1_VDD3P3", start, end, conn, color="chocolate") - e = pf.add_to_plot(p, "VDD", "INRSD_ALG_A1_VDD", start, end, conn, color="coral") - f = pf.add_to_plot(p, "REF", "INRSD_ALG_A1_REF", start, end, conn, color="darkorange") - g = pf.add_to_plot(p, "DSUB_V", "INRSD_A1_DSUB_V", start, end, conn, color="crimson") - h = pf.add_to_plot(p, "VRESET_V", "INRSD_A1_VRESET_V", start, end, conn, color="cyan") - i = pf.add_to_plot(p, "CELLDRN_V", "INRSD_A1_CELLDRN_V", start, end, conn, color="darkblue") - j = pf.add_to_plot(p, "DRAIN_V", "INRSD_A1_DRAIN_V", start, end, conn, color="darkgreen") - k = pf.add_to_plot(p, "VBIASGATE_V", "INRSD_A1_VBIASGATE_V", start, end, conn, color="darkmagenta") - l = pf.add_to_plot(p, "VBIASPWR_V", "INRSD_A1_VBIASPWR_V", start, end, conn, color="cornflowerblue") - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i, j, k, l]) - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def asic_2_voltages(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=800, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "ASIC 2 Voltages" - pf.add_basic_layout(p) - a = pf.add_to_plot(p, "VDDA", "IGDP_NRSD_ALG_A2_VDDA", start, end, conn, color="burlywood") - b = pf.add_to_plot(p, "A2GND4VDA", "IGDP_NRSD_ALG_A2GND4VDA", start, end, conn, color="cadetblue") - c = pf.add_to_plot(p, "A2GND5VRF", "IGDP_NRSD_ALG_A2GND5VRF", start, end, conn, color="chartreuse") - d = pf.add_to_plot(p, "A2VDD3P3", "INRSD_ALG_A2_VDD3P3", start, end, conn, color="chocolate") - e = pf.add_to_plot(p, "VDD", "INRSD_ALG_A2_VDD", start, end, conn, color="coral") - f = pf.add_to_plot(p, "REF", "INRSD_ALG_A2_REF", start, end, conn, color="darkorange") - g = pf.add_to_plot(p, "DSUB_V", "INRSD_A2_DSUB_V", start, end, conn, color="crimson") - h = pf.add_to_plot(p, "VRESET_V", "INRSD_A2_VRESET_V", start, end, conn, color="cyan") - i = pf.add_to_plot(p, "CELLDRN_V", "INRSD_A2_CELLDRN_V", start, end, conn, color="darkblue") - j = pf.add_to_plot(p, "DRAIN_V", "INRSD_A2_DRAIN_V", start, end, conn, color="darkgreen") - k = pf.add_to_plot(p, "VBIASGATE_V", "INRSD_A2_VBIASGATE_V", start, end, conn, color="darkmagenta") - l = pf.add_to_plot(p, "VBIASPWR_V", "INRSD_A2_VBIASPWR_V", start, end, conn, color="cornflowerblue") - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i, j, k, l]) - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def asic_1_currents(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Current (mA)') - - p.grid.visible = True - p.title.text = "ASIC 1 Currents" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "VDD_C", "IGDP_NRSD_ALG_A1_VDD_C", start, end, conn, color="burlywood") - b = pf.add_to_plot(p, "A1VDAP12C", "IGDP_NRSD_ALG_A1VDAP12C", start, end, conn, color="cadetblue") - c = pf.add_to_plot(p, "A1VDAN12C", "IGDP_NRSD_ALG_A1VDAN12C", start, end, conn, color="chartreuse") - d = pf.add_to_plot(p, "VDDA_I", "INRSD_A1_VDDA_I", start, end, conn, color="chocolate") - - pf.add_hover_tool(p, [a, b, c, d]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def asic_2_currents(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Current (mA)') - - p.grid.visible = True - p.title.text = "ASIC 2 Currents" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "VDD_C", "IGDP_NRSD_ALG_A2_VDD_C", start, end, conn, color="burlywood") - b = pf.add_to_plot(p, "A2VDAP12C", "IGDP_NRSD_ALG_A2VDAP12C", start, end, conn, color="cadetblue") - c = pf.add_to_plot(p, "A2VDAN12C", "IGDP_NRSD_ALG_A2VDAN12C", start, end, conn, color="chartreuse") - d = pf.add_to_plot(p, "VDDA_I", "INRSD_A2_VDDA_I", start, end, conn, color="chocolate") - - pf.add_hover_tool(p, [a, b, c, d]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def fpe_fpa_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
ASIC (1,2) VoltagesIGDP_NRSD_ALG_A(1,2)_VDDA
- IGDP_NRSD_ALG_A(1,2)GND4VDA
- IGDP_NRSD_ALG_A(1,2)GND5VRF
- INRSD_ALG_A(1,2)_VDD3P3
- INRSD_ALG_A(1,2)_VDD
- INRSD_ALG_A(1,2)_REF
- INRSD_A(1,2)_DSUB_V
- INRSD_A(1,2)_VRESET_V
- INRSD_A(1,2)_CELLDRN_V
- INRSD_A(1,2)_DRAIN_V
- INRSD_A(1,2)_VBIASGATE_V
- INRSD_A(1,2)_VBIASPWR_V
-
- ASIC (1,2) VDDA Voltage
- ASIC (1,2) VDDA/Ground Voltage
- ASIC (1,2) Ref/Ground Voltage
- ASIC (1,2) VDD 3.3 Supply Voltage
- ASIC (1,2) VDD Voltage
- ASIC (1,2) Reference Voltage
- ASIC (1,2) Dsub Voltage
- ASIC (1,2) Reset Voltage
- ASIC (1,2) Cell Drain Voltage
- ASIC (1,2) Drain Voltage
- ASIC (1,2) Bias Gate Voltage
- ASIC (1,2) Bias Power Voltage
-
ASIC (1,2) CurrentsIGDP_NRSD_ALG_A(1,2)_VDD_C
- IGDP_NRSD_ALG_A(1,2)VDAP12C
- IGDP_NRSD_ALG_A(1,2)VDAN12C
- INRSD_A(1,2)_VDDA_I
-
ASIC (1,2) VDD Current
- ASIC (1,2) VDDA +12V Current
- ASIC (1,2) VDDA -12V Current
- ASIC (1,2) VDDA Current
-
- - """, width=1100) - - plot1 = asic_1_voltages(conn, start, end) - plot2 = asic_2_voltages(conn, start, end) - plot3 = asic_1_currents(conn, start, end) - plot4 = asic_2_currents(conn, start, end) - - currents = Row(plot3, plot4) - layout = Column(descr, plot1, plot2, currents) - - tab = Panel(child=layout, title="FPE/FPA") - - return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/msa_mce_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/msa_mce_tab.py deleted file mode 100644 index e36dd6a34..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/msa_mce_tab.py +++ /dev/null @@ -1,559 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for Temperature tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1 - MCE Board 1 (AIC) Voltages - INRSM_MCE_AIC_1R5_V - INRSM_MCE_AIC_3R3_V - INRSM_MCE_AIC_5_V - INRSM_MCE_AIC_P12_V - INRSM_MCE_AIC_N12_V - - Plot 2 - MCE Board 1 (AIC) Currents - INRSM_MCE_AIC_3R3_I - INRSM_MCE_AIC_5_I - INRSM_MCE_AIC_P12_I - INRSM_MCE_AIC_N12_I - - Plot 3 - MCE Board 2 (MDAC) Voltages - INRSM_MCE_MDAC_1R5_V - INRSM_MCE_MDAC_3R3_V - INRSM_MCE_MDAC_5_V - INRSM_MCE_MDAC_P12_V - INRSM_MCE_MDAC_N12_V - - Plot 4 - MCE Board 2 (MDAC) Currents - INRSM_MCE_MDAC_3R3_I - INRSM_MCE_MDAC_5_I - INRSM_MCE_MDAC_P12_I - INRSM_MCE_MDAC_N12_I - - Plot (5-8) - QUAD (1-4) - INRSM_MSA_Q(1-4)_365VDD - INRSM_MSA_Q(1-4)_365VPP - INRSM_MSA_Q(1-4)_171VPP - IGDPM_MSA_Q(1-4)_365IDD - IGDPM_MSA_Q(1-4)_365IPP - IGDPM_MSA_Q(1-4)_171RTN - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``nirspec_dashboard.py``, e.g.: - - :: - from .plots.msa_mce_tab import msa_mce_plots - tab = msa_mce_plots(conn, start, end) - -Dependencies ------------- - User must provide database "nirspec_database.db" - -""" -import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.models import Title -from bokeh.layouts import gridplot, Column - - -def aic_voltage(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "MCE Board 1 (AIC)" - p.add_layout(Title(text="Voltages", text_font_style="italic", text_font_size="12pt"), 'above') - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "1R5_V", "INRSM_MCE_AIC_1R5_V", start, end, conn, color="red") - b = pf.add_to_plot(p, "3R3_V", "INRSM_MCE_AIC_3R3_V", start, end, conn, color="orange") - c = pf.add_to_plot(p, "5_V", "INRSM_MCE_AIC_5_V", start, end, conn, color="brown") - d = pf.add_to_plot(p, "P12_V", "INRSM_MCE_AIC_P12_V", start, end, conn, color="burlywood") - e = pf.add_to_plot(p, "N12_V", "INRSM_MCE_AIC_N12_V", start, end, conn, color="darkmagenta") - - pf.add_hover_tool(p, [a, b, c, d, e]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def aic_current(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Current (A)') - - p.grid.visible = True - p.title.text = "MCE Board 1 (AIC)" - p.add_layout(Title(text="Currents", text_font_style="italic", text_font_size="12pt"), 'above') - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "3R3_I", "INRSM_MCE_AIC_3R3_I", start, end, conn, color="blue") - b = pf.add_to_plot(p, "5_I", "INRSM_MCE_AIC_5_I", start, end, conn, color="red") - c = pf.add_to_plot(p, "P12_I", "INRSM_MCE_AIC_P12_I", start, end, conn, color="green") - d = pf.add_to_plot(p, "N12_I", "INRSM_MCE_AIC_N12_I", start, end, conn, color="orange") - - pf.add_hover_tool(p, [a, b, c, d]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def mdac_voltage(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "MCE Board 2 (MDAC)" - p.add_layout(Title(text="Voltages", text_font_style="italic", text_font_size="12pt"), 'above') - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "1R5_V", "INRSM_MCE_MDAC_1R5_V", start, end, conn, color="red") - b = pf.add_to_plot(p, "3R3_V", "INRSM_MCE_MDAC_3R3_V", start, end, conn, color="orange") - c = pf.add_to_plot(p, "5_V", "INRSM_MCE_MDAC_5_V", start, end, conn, color="brown") - d = pf.add_to_plot(p, "P12_V", "INRSM_MCE_MDAC_P12_V", start, end, conn, color="burlywood") - e = pf.add_to_plot(p, "N12_V", "INRSM_MCE_MDAC_N12_V", start, end, conn, color="darkmagenta") - - pf.add_hover_tool(p, [a, b, c, d, e]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def mdac_current(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "MCE Board 2 (MDAC)" - p.add_layout(Title(text="Currents", text_font_style="italic", text_font_size="12pt"), 'above') - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "3R3_I", "INRSM_MCE_MDAC_3R3_I", start, end, conn, color="blue") - b = pf.add_to_plot(p, "5_I", "INRSM_MCE_MDAC_5_I", start, end, conn, color="red") - c = pf.add_to_plot(p, "P12_I", "INRSM_MCE_MDAC_P12_I", start, end, conn, color="green") - d = pf.add_to_plot(p, "N12_I", "INRSM_MCE_MDAC_N12_I", start, end, conn, color="orange") - - pf.add_hover_tool(p, [a, b, c, d]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def quad1_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "Quad 1" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q1_365VDD", start, end, conn, color="red") - b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q1_365VPP", start, end, conn, color="orange") - c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q1_171VPP", start, end, conn, color="brown") - d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q1_365IDD", start, end, conn, color="burlywood") - e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q1_365IPP", start, end, conn, color="darkmagenta") - f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q1_171RTN", start, end, conn, color="blue") - - pf.add_hover_tool(p, [a, b, c, d, e, f]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def quad2_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "Quad 2" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q2_365VDD", start, end, conn, color="red") - b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q2_365VPP", start, end, conn, color="orange") - c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q2_171VPP", start, end, conn, color="brown") - d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q2_365IDD", start, end, conn, color="burlywood") - e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q2_365IPP", start, end, conn, color="darkmagenta") - f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q2_171RTN", start, end, conn, color="blue") - - pf.add_hover_tool(p, [a, b, c, d, e, f]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def quad3_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "Quad 3" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q3_365VDD", start, end, conn, color="red") - b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q3_365VPP", start, end, conn, color="orange") - c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q3_171VPP", start, end, conn, color="brown") - d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q3_365IDD", start, end, conn, color="burlywood") - e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q3_365IPP", start, end, conn, color="darkmagenta") - f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q3_171RTN", start, end, conn, color="blue") - - pf.add_hover_tool(p, [a, b, c, d, e, f]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def quad4_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=560, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "Quad 4" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "365VDD", "INRSM_MSA_Q4_365VDD", start, end, conn, color="red") - b = pf.add_to_plot(p, "365VPP", "INRSM_MSA_Q4_365VPP", start, end, conn, color="orange") - c = pf.add_to_plot(p, "171VPP", "INRSM_MSA_Q4_171VPP", start, end, conn, color="brown") - d = pf.add_to_plot(p, "365IDD", "IGDPM_MSA_Q4_365IDD", start, end, conn, color="burlywood") - e = pf.add_to_plot(p, "365IPP", "IGDPM_MSA_Q4_365IPP", start, end, conn, color="darkmagenta") - f = pf.add_to_plot(p, "171RTN", "IGDPM_MSA_Q4_171RTN", start, end, conn, color="blue") - - pf.add_hover_tool(p, [a, b, c, d, e, f]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def msa_mce_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
MCE Board 1 (AIC) VoltagesINRSM_MCE_AIC_1R5_V
- INRSM_MCE_AIC_3R3_V
- INRSM_MCE_AIC_5_V
- INRSM_MCE_AIC_P12_V
- INRSM_MCE_AIC_N12_V
-
MCE AIC +1.5V Voltage
- MCE AIC +3.3V Voltage
- MCE AIC +5V Voltage
- MCE AIC +12V Voltage
- MCE AIC -12V Voltage
-
MCE Board 1 (AIC) CurrentsINRSM_MCE_AIC_3R3_I
- INRSM_MCE_AIC_5_I
- INRSM_MCE_AIC_P12_I
- INRSM_MCE_AIC_N12_I
-
MCE AIC Board +3.3V Current
- MCE AIC Board +5V Current
- MCE AIC Board +12V Current
- MCE AIC Board -12V Current
-
MCE Board 2 (MDAC) VoltagesINRSM_MCE_MDAC_1R5_V
- INRSM_MCE_MDAC_3R3_V
- INRSM_MCE_MDAC_5_V
- INRSM_MCE_MDAC_P12_V
- INRSM_MCE_MDAC_N12_V
-
MCE MDAC +1.5V Voltage
- MCE MDAC +3.3V Voltage
- MCE MDAC +5V Voltage
- MCE MDAC +12V Voltage
- MCE MDAC -12V Voltage
-
MCE Board 2 (MDAC) CurrentsINRSM_MCE_MDAC_3R3_I
- INRSM_MCE_MDAC_5_I
- INRSM_MCE_MDAC_P12_I
- INRSM_MCE_MDAC_N12_I
-
MCE MDAC Board +3.3V Current
- MCE MDAC Board +5V Current
- MCE MDAC Board +12V Current
- MCE MDAC Board -12V Current
-
QUAD (1-4)INRSM_MSA_Q(1-4)_365VDD
- INRSM_MSA_Q(1-4)_365VPP
- INRSM_MSA_Q(1-4)_171VPP
- IGDPM_MSA_Q(1-4)_365IDD
- IGDPM_MSA_Q(1-4)_365IPP
- IGDPM_MSA_Q(1-4)_171RTN
-
MSA Quad (1-4) Vdd 365 Voltage
- MSA Quad (1-4) Vpp 365 Voltage
- MSA Quad (1-4) Vpp 171 Voltage
- MSA Quad (1-4) Vdd 365 Current
- MSA Quad (1-4) Vpp 365 Current
- MSA Quad (1-4) Return 171 Current
-
- - """, width=1100) - - plot1 = aic_voltage(conn, start, end) - plot2 = aic_current(conn, start, end) - plot3 = mdac_voltage(conn, start, end) - plot4 = mdac_current(conn, start, end) - plot5 = quad1_volt(conn, start, end) - plot6 = quad2_volt(conn, start, end) - plot7 = quad3_volt(conn, start, end) - plot8 = quad4_volt(conn, start, end) - - grid = gridplot([[plot1, plot2], - [plot3, plot4], - [plot5, plot6], - [plot7, plot8]], - merge_tools=False) - layout = Column(descr, grid) - - tab = Panel(child=layout, title="MSA/MCE") - - return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/plot_functions.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/plot_functions.py deleted file mode 100644 index 14b31dd13..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/plot_functions.py +++ /dev/null @@ -1,206 +0,0 @@ -#! /usr/bin/env python -"""Auxilary functions for plots - - Module holds functions that are used for several plots. - - -Authors -------- - - Daniel Kühbacher - -Use ---- - - -Dependencies ------------- - -""" -from bokeh.models import BoxAnnotation -from bokeh.models import ColumnDataSource - -import pandas as pd -import numpy as np - -from astropy.time import Time - - -def pol_regression(x, y, rank): - ''' Calculate polynominal regression of certain rank - Parameters - ---------- - x : list - x parameters for regression - y : list - y parameters for regression - rank : int - rank of regression - Return - ------ - y_poly : list - regression y parameters - ''' - z = np.polyfit(x, y, rank) - f = np.poly1d(z) - y_poly = f(x) - return y_poly - - -def add_hover_tool(p, rend): - ''' Append hover tool to plot - parameters - ---------- - p : bokeh figure - declares where to append hover tool - rend : list - list of renderer to append hover tool - ''' - - from bokeh.models import HoverTool - - # activate HoverTool for scatter plot - hover_tool = HoverTool(tooltips=[('Name', '$name'), - ('Count', '@data_points'), - ('Mean', '@average'), - ('Deviation', '@deviation'), - ], renderers=rend) - # append hover tool - p.tools.append(hover_tool) - - -def add_limit_box(p, lower, upper, alpha=0.1, color="green"): - ''' Adds box to plot - Parameters - ---------- - p : bokeh figure - declares where to append hover tool - lower : float - lower limit of box - upper : float - upper limit of box - alpha : float - transperency of box - color : str - filling color - ''' - box = BoxAnnotation(bottom=lower, top=upper, fill_alpha=alpha, fill_color=color) - p.add_layout(box) - - -def add_to_plot(p, legend, mnemonic, start, end, conn, y_axis= "default", color="red", err='y'): - '''Add scatter and line to certain plot and activates hoover tool - Parameters - ---------- - p : bokeh object - defines plot where line and scatter should be added - legend : str - will be showed in legend of plot - mnemonic : str - defines mnemonic to be plotted - start : datetime - sets start time for data query - end : datetime - sets end time for data query - conn : DBobject - connection object to database - y_axis : str (default='default') - used if secon y axis is provided - color : str (default='dred') - defines color for scatter and line plot - Return - ------ - scat : plot scatter object - used for applying hovertools o plots - ''' - - # convert given start and end time to astropy time - start_str = str(Time(start).mjd) - end_str = str(Time(end).mjd) - - # prepare and execute sql query - sql_c = "SELECT * FROM "+mnemonic+" WHERE start_time BETWEEN "+start_str+" AND "+end_str+" ORDER BY start_time" - temp = pd.read_sql_query(sql_c, conn) - - # put data into Dataframe and define ColumnDataSource for each plot - # reg = pd.DataFrame({'reg' : pol_regression(temp['start_time'], temp['average'],3)}) - # temp = pd.concat([temp, reg], axis = 1) - temp['start_time'] = pd.to_datetime(Time(temp['start_time'], format="mjd").datetime) - plot_data = ColumnDataSource(temp) - - # plot data - p.line(x="start_time", y="average", color=color, y_range_name=y_axis, legend=legend, source=plot_data) - scat = p.scatter(x="start_time", y="average", name=mnemonic, color=color, y_range_name=y_axis, legend=legend, source=plot_data) - - # generate error lines if wished - if err != 'n': - # generate error bars - err_xs = [] - err_ys = [] - - for index, item in temp.iterrows(): - err_xs.append((item['start_time'], item['start_time'])) - err_ys.append((item['average'] - item['deviation'], item['average'] + item['deviation'])) - - # plot them - p.multi_line(err_xs, err_ys, color=color, legend=legend) - - return scat - - -def add_to_plot_normalized(p, legend, mnemonic, start, end, conn, nominal, color="red"): - '''Add line plot to figure (for wheelpositions) - Parameters - ---------- - p : bokeh object - defines figure where line schould be plotted - legend : str - will be showed in legend of plot - mnemonic : str - defines mnemonic to be plotted - start : datetime - sets start time for data query - end : datetime - sets end time for data query - conn : DBobject - connection object to database - color : str (default='dred') - defines color for scatter and line plot - ''' - - start_str = str(Time(start).mjd) - end_str = str(Time(end).mjd) - - sql_c = "SELECT * FROM "+mnemonic+" WHERE timestamp BETWEEN "+start_str+" AND "+end_str+" ORDER BY timestamp" - temp = pd.read_sql_query(sql_c, conn) - - # normalize values - temp['value'] -= nominal - # temp['value'] -= 1 - - temp['timestamp'] = pd.to_datetime(Time(temp['timestamp'], format="mjd").datetime) - plot_data = ColumnDataSource(temp) - - p.line(x="timestamp", y="value", color=color, legend=legend, source=plot_data) - p.scatter(x="timestamp", y="value", color=color, legend=legend, source=plot_data) - - -def add_basic_layout(p): - '''Add basic layout to certain plot - Parameters - ---------- - p : bokeh object - defines plot where line and scatter should be added - ''' - p.title.align = "left" - p.title.text_color = "#c85108" - p.title.text_font_size = "25px" - p.background_fill_color = "#efefef" - - p.xaxis.axis_label_text_font_size = "14pt" - p.xaxis.axis_label_text_color ='#2D353C' - p.yaxis.axis_label_text_font_size = "14pt" - p.yaxis.axis_label_text_color = '#2D353C' - - p.xaxis.major_tick_line_color = "firebrick" - p.xaxis.major_tick_line_width = 2 - p.xaxis.minor_tick_line_color = "#c85108" diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/power_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/power_tab.py deleted file mode 100644 index 5c64227a1..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/power_tab.py +++ /dev/null @@ -1,300 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for POWER tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1 - ICE Power Data - GP_ZPSVOLT - SE_ZINRSICEA / SE_ZINRSICEB - INRSH_HK_P15V - INRSH_HK_N15V - INRSH_HK_VMOTOR - INRSH_HK_P5V - INRSH_HK_2P5V - INRSH_HK_ADCTGAIN - INRSH_HK_ADCTOFFSET - INRSH_OA_VREFOFF - INRSH_OA_VREF - - Plot 2 - MCE Power Data - GP_ZPSVOLT - SE_ZINRSMCEA / SE_ZINRSMCEB - - Plot 3 - FPE Power Data - GP_ZPSVOLT - SE_ZINRSFPEA / SE_ZINRSFPEB - INRSD_ALG_ACC_P12C - INRSD_ALG_ACC_N12C - INRSD_ALG_ACC_3D3_1D5_C - INRSD_ALG_CHASSIS - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``nirspec_dashboard.py``, e.g.: - - :: - from .plots.power_tab import power_plots - tab = power_plots(conn, start, end) - -Dependencies ------------- - User must provide database "miri_database.db" - -""" -import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf -from bokeh.models import LinearAxis, Range1d -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import Column - - -def ice_power(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - y_range=[-20, 20], - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "ICE Power Parameters" - pf.add_basic_layout(p) - - p.extra_y_ranges = {"current": Range1d(start=0, end=0.8)} - #a = pf.add_to_plot(p, "In_VOlt", "GP_ZPSVOLT", start, end, conn, color="red") - b = pf.add_to_plot(p, "ICE A current", "SE_ZINRSICEA", start, end, conn, color="blue", y_axis="current") - c = pf.add_to_plot(p, "P15V", "INRSH_HK_P15V", start, end, conn, color="red") - d = pf.add_to_plot(p, "N15V", "INRSH_HK_N15V", start, end, conn, color="orange") - e = pf.add_to_plot(p, "VMOTOR", "INRSH_HK_VMOTOR", start, end, conn, color="burlywood") - f = pf.add_to_plot(p, "P5V", "INRSH_HK_P5V", start, end, conn, color="green") - g = pf.add_to_plot(p, "2P5V", "INRSH_HK_2P5V", start, end, conn, color="darkgreen") - h = pf.add_to_plot(p, "ADCTGAIN", "INRSH_HK_ADCTGAIN", start, end, conn, color="brown") - i = pf.add_to_plot(p, "ADCOFFSET", "INRSH_HK_ADCTOFFSET", start, end, conn, color="navy") - p.add_layout(LinearAxis(y_range_name="current", axis_label="Current (A)", axis_label_text_color="blue"), 'right') - - pf.add_hover_tool(p, [b, c, d, e, g, f, h, i]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def mce_power(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=400, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Current (A)') - - p.grid.visible = True - p.title.text = "MCE Power Parameters" - pf.add_basic_layout(p) - - b = pf.add_to_plot(p, "MCE A current", "SE_ZINRSMCEA", start, end, conn, color="blue") - - pf.add_hover_tool(p,[b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def fpe_power(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - y_range=[-30, 280], - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "FPE Power Parameters" - pf.add_basic_layout(p) - - p.extra_y_ranges = {"current": Range1d(start=0, end=0.8)} - # a = pf.add_to_plot(p, "In_VOlt", "GP_ZPSVOLT", start, end, conn, color="red") - b = pf.add_to_plot(p, "FPE A current", "SE_ZINRSFPEA", start, end, conn, color="blue", y_axis="current") - c = pf.add_to_plot(p, "P12C", "INRSD_ALG_ACC_P12C", start, end, conn, color="red") - d = pf.add_to_plot(p, "N15V", "INRSH_HK_N15V", start, end, conn, color="orange") - e = pf.add_to_plot(p, "N12C", "INRSD_ALG_ACC_N12C", start, end, conn, color="burlywood") - f = pf.add_to_plot(p, "1D5", "INRSD_ALG_ACC_3D3_1D5_C", start, end, conn, color="green") - g = pf.add_to_plot(p, "Chassis", "INRSD_ALG_CHASSIS", start, end, conn, color="purple") - p.add_layout(LinearAxis(y_range_name="current", axis_label="Current (A)", axis_label_text_color="blue"), 'right') - - pf.add_hover_tool(p, [b, c, d, e, f, g]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def power_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
ICE Power ParametersGP_ZPSVOLT (missing)
- SE_ZINRSICEA
- INRSH_HK_P15V
- INRSH_HK_N15V
- INRSH_HK_VMOTOR
- INRSH_HK_P5V
- INRSH_HK_2P5V
- INRSH_HK_ADCTGAIN
- INRSH_HK_ADCTOFFSET
- INRSH_OA_VREFOFF
- INRSH_OA_VREF
-
ICE Input Voltage
- ICE Input Current (A side)
- ICE +15V Voltage
- ICE -15V Voltage
- ICE Motor Voltage
- ICE +5V FPGA Voltage
- ICE +2V5 FPGA Voltage
- ICE ADC TM Chain Gain for Calibration
- ICE ADC TM Chain Offset for Calibration
-
MCE Power ParametersGP_ZPSVOLT (missing)
- SE_ZINRSMCEA -
ICE Input Voltage
- MCE Input Current (A side)
-
FPE Power ParametersGP_ZPSVOLT (missing)
- SE_ZINRSFPEA
- INRSD_ALG_ACC_P12C
- INRSD_ALG_ACC_N12C
- INRSD_ALG_ACC_3D3_1D5_C
- INRSD_ALG_CHASSIS
-
ICE Input Voltage
- MCE Input Current (A side)
- ACC +12V Current
- ACC -12V Current
- ACC 3.3/1.5 Supply Current
- Chassis Voltage
-
- - """, width=1100) - - plot1 = ice_power(conn, start, end) - plot2 = mce_power(conn, start, end) - plot3 = fpe_power(conn, start, end) - - layout = Column(descr, plot1, plot2, plot3) - - tab = Panel(child=layout, title="POWER") - - return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/temperature_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/temperature_tab.py deleted file mode 100644 index 1bcd1e355..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/temperature_tab.py +++ /dev/null @@ -1,648 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for Temperature tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1 - IRSU monitored temps - SI_GZCTS75A / SI_GZCTS75B - SI_GZCTS68A / SI_GZCTS68B - SI_GZCTS81A / SI_GZCTS81B - SI_GZCTS80A / SI_GZCTS80B - SI_GZCTS76A / SI_GZCTS76B - SI_GZCTS79A / SI_GZCTS79B - SI_GZCTS77A / SI_GZCTS77B - SI_GZCTS78A / SI_GZCTS78B - SI_GZCTS69A / SI_GZCTS69B - - Plot 2 - Box Temps - IGDP_NRSD_ALG_TEMP - INRSH_HK_TEMP1 - INRSH_HK_TEMP2 - - Plot 3 - FPE Power Data - IGDP_NRSI_C_CAM_TEMP - IGDP_NRSI_C_COL_TEMP - IGDP_NRSI_C_COM1_TEMP - IGDP_NRSI_C_FOR_TEMP - IGDP_NRSI_C_IFU_TEMP - IGDP_NRSI_C_BP1_TEMP - IGDP_NRSI_C_BP2_TEMP - IGDP_NRSI_C_BP3_TEMP - IGDP_NRSI_C_BP4_TEMP - IGDP_NRSI_C_RMA_TEMP - IGDP_NRSI_C_CAAL1_TEMP - IGDP_NRSI_C_CAAL2_TEMP - IGDP_NRSI_C_CAAL3_TEMP - IGDP_NRSI_C_CAAL4_TEMP - IGDP_NRSI_C_FWA_TEMP - IGDP_NRSI_C_GWA_TEMP - - Plot 4 - MCE internal Temp - INRSM_MCE_PCA_TMP1 - INRSM_MCE_PCA_TMP2 - INRSM_MCE_AIC_TMP_FPGA - INRSM_MCE_AIC_TMP_ADC - INRSM_MCE_AIC_TMP_VREG - INRSM_MCE_MDAC_TMP_FPGA - INRSM_MCE_MDAC_TMP_OSC - INRSM_MCE_MDAC_TMP_BRD - INRSM_MCE_MDAC_TMP_PHA - INRSM_MCE_MDAC_TMP_PHB - - Plot 5 - MSA Temp - INRSM_Q1_TMP_A - INRSM_Q2_TMP_A - INRSM_Q3_TMP_A - INRSM_Q4_TMP_A - INRSM_MECH_MTR_TMP_A - INRSM_LL_MTR_TMP_A - INRSM_MSA_TMP_A - - Plot 6 - FPA Temp - IGDP_NRSD_ALG_FPA_TEMP - IGDP_NRSD_ALG_A1_TEMP - IGDP_NRSD_ALG_A2_TEMP - - Plot 7 - Heat Strap Temps (Trim heaters) - SI_GZCTS74A / SI_GZCTS74B - SI_GZCTS67A / SI_GZCTS67B - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``nirspec_dashboard.py``, e.g.: - - :: - from .plots.temperature_tab import temperature_plots - tab = temperature_plots(conn, start, end) - -Dependencies ------------- - User must provide database "nirspec_database.db" - -""" -import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import Column - - -def irsu_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "IRSU monitored Temperatures" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "75A", "SI_GZCTS75A", start, end, conn, color="red") - b = pf.add_to_plot(p, "68A", "SI_GZCTS68A", start, end, conn, color="green") - c = pf.add_to_plot(p, "81A", "SI_GZCTS81A", start, end, conn, color="blue") - d = pf.add_to_plot(p, "80A", "SI_GZCTS80A", start, end, conn, color="orange") - e = pf.add_to_plot(p, "76A", "SI_GZCTS76A", start, end, conn, color="brown") - f = pf.add_to_plot(p, "79A", "SI_GZCTS79A", start, end, conn, color="cyan") - g = pf.add_to_plot(p, "77A", "SI_GZCTS77A", start, end, conn, color="darkmagenta") - h = pf.add_to_plot(p, "78A", "SI_GZCTS78A ", start, end, conn, color="burlywood") - i = pf.add_to_plot(p, "69A", "SI_GZCTS69A ", start, end, conn, color="chocolate") - - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def fpe_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "FPE Temperatures" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "CAM", "IGDP_NRSI_C_CAM_TEMP", start, end, conn, color="red") - b = pf.add_to_plot(p, "COL", "IGDP_NRSI_C_COL_TEMP", start, end, conn, color="green") - c = pf.add_to_plot(p, "COM1", "IGDP_NRSI_C_COM1_TEMP", start, end, conn, color="blue") - d = pf.add_to_plot(p, "FOR", "IGDP_NRSI_C_FOR_TEMP", start, end, conn, color="darkorange") - e = pf.add_to_plot(p, "IFU", "IGDP_NRSI_C_IFU_TEMP", start, end, conn, color="cyan") - f = pf.add_to_plot(p, "BP1", "IGDP_NRSI_C_BP1_TEMP", start, end, conn, color="darkmagenta") - g = pf.add_to_plot(p, "BP2", "IGDP_NRSI_C_BP2_TEMP", start, end, conn, color="burlywood") - h = pf.add_to_plot(p, "BP3", "IGDP_NRSI_C_BP3_TEMP", start, end, conn, color="brown") - i = pf.add_to_plot(p, "BP4", "IGDP_NRSI_C_BP4_TEMP", start, end, conn, color="chocolate") - j = pf.add_to_plot(p, "RMA", "IGDP_NRSI_C_RMA_TEMP", start, end, conn, color="darkgreen") - - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i, j]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def caal_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "CAA Lamps / FWA, GWA" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "CAAL1", "IGDP_NRSI_C_CAAL1_TEMP", start, end, conn, color="darkblue") - b = pf.add_to_plot(p, "CAAL2", "IGDP_NRSI_C_CAAL2_TEMP", start, end, conn, color="magenta") - c = pf.add_to_plot(p, "CAAL3", "IGDP_NRSI_C_CAAL3_TEMP", start, end, conn, color="mediumaquamarine") - d = pf.add_to_plot(p, "CAAL4", "IGDP_NRSI_C_CAAL4_TEMP", start, end, conn, color="goldenrod") - e = pf.add_to_plot(p, "FWA", "IGDP_NRSI_C_FWA_TEMP", start, end, conn, color="darkseagreen") - f = pf.add_to_plot(p, "GWA", "IGDP_NRSI_C_GWA_TEMP", start, end, conn, color="darkkhaki") - - pf.add_hover_tool(p, [a, b, c, d, e, f]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def box_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "Box Temperatures" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "ALG_TEMP", "IGDP_NRSD_ALG_TEMP", start, end, conn, color="red") - b = pf.add_to_plot(p, "HK_TEMP1", "INRSH_HK_TEMP1", start, end, conn, color="green") - c = pf.add_to_plot(p, "HK_TEMP2", "INRSH_HK_TEMP2", start, end, conn, color="blue") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def mce_internal_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "MCE internal Temperatures" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "PCA_TMP1", "INRSM_MCE_PCA_TMP1", start, end, conn, color="green") - b = pf.add_to_plot(p, "PCA_TMP2", "INRSM_MCE_PCA_TMP2", start, end, conn, color="blue") - c = pf.add_to_plot(p, "FPGA_AIC", "INRSM_MCE_AIC_TMP_FPGA", start, end, conn, color="brown") - d = pf.add_to_plot(p, "ADC_AIC", "INRSM_MCE_AIC_TMP_ADC", start, end, conn, color="red") - e = pf.add_to_plot(p, "VREG_AIC", "INRSM_MCE_AIC_TMP_VREG", start, end, conn, color="hotpink") - f = pf.add_to_plot(p, "FPGA_MDAC", "INRSM_MCE_MDAC_TMP_FPGA", start, end, conn, color="cadetblue") - g = pf.add_to_plot(p, "OSC_MDAC", "INRSM_MCE_MDAC_TMP_OSC", start, end, conn, color="navy") - h = pf.add_to_plot(p, "BRD_MDAC", "INRSM_MCE_MDAC_TMP_BRD", start, end, conn, color="darkgreen") - i = pf.add_to_plot(p, "PHA_MDAC", "INRSM_MCE_MDAC_TMP_PHA", start, end, conn, color="magenta") - j = pf.add_to_plot(p, "PHB_MDAC", "INRSM_MCE_MDAC_TMP_PHB", start, end, conn, color="orange") - - pf.add_hover_tool(p, [a, b, c, d, e, f, g, h, i, j]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def msa_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "MSA Temperatures" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "Q1_TEMP", "INRSM_Q1_TMP_A", start, end, conn, color="green") - b = pf.add_to_plot(p, "Q2_TEMP", "INRSM_Q2_TMP_A", start, end, conn, color="red") - c = pf.add_to_plot(p, "Q3_TEMP", "INRSM_Q3_TMP_A", start, end, conn, color="blue") - d = pf.add_to_plot(p, "Q4_TEMP", "INRSM_Q4_TMP_A", start, end, conn, color="brown") - e = pf.add_to_plot(p, "MECH_MTR", "INRSM_MECH_MTR_TMP_A", start, end, conn, color="orange") - f = pf.add_to_plot(p, "LL_MTR", "INRSM_LL_MTR_TMP_A", start, end, conn, color="darkmagenta") - g = pf.add_to_plot(p, "MSA", "INRSM_MSA_TMP_A", start, end, conn, color="indigo") - - pf.add_hover_tool(p, [a, b, c, d, e, f, g]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def fpa_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "FPA Temperatures" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "ALG_FPA", "IGDP_NRSD_ALG_FPA_TEMP", start, end, conn, color="green") - b = pf.add_to_plot(p, "ALG_A1", "IGDP_NRSD_ALG_A1_TEMP", start, end, conn, color="red") - c = pf.add_to_plot(p, "ALG_A2", "IGDP_NRSD_ALG_A2_TEMP", start, end, conn, color="blue") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def heat_strap_temp(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=700, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Temperature (K)') - - p.grid.visible = True - p.title.text = "Heat Strap Temperatures (Trim heaters)" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "74A", "SI_GZCTS74A", start, end, conn, color="green") - b = pf.add_to_plot(p, "67A", "SI_GZCTS67A", start, end, conn, color="red") - - pf.add_hover_tool(p, [a, b]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - - return p - - -def temperature_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
IRSU monitored TemperaturesSI_GZCTS75A
- SI_GZCTS68A
- SI_GZCTS81A
- SI_GZCTS80A
- SI_GZCTS76A
- SI_GZCTS79A
- SI_GZCTS77A
- SI_GZCTS78A
- SI_GZCTS69A
CAA IRSU Temperature
- CAM IRSU Temperature
- COM1 Nominal IRSU Temperature
- COM1 Redundant IRSU Temperature
- FWA IRSU Temperature
- GWA IRSU Temperature
- Thermal Strap Nominal IRSU Temperature
- Thermal Strap Redundant IRSU Temperature
- MSA Nominal IRSU Temperature
- MSA Redundant IRSU Temperature
FPE Temperatures/td> - IGDP_NRSI_C_CAM_TEMP
- IGDP_NRSI_C_COL_TEMP
- IGDP_NRSI_C_COM1_TEMP
- IGDP_NRSI_C_FOR_TEMP
- IGDP_NRSI_C_IFU_TEMP
- IGDP_NRSI_C_BP1_TEMP
- IGDP_NRSI_C_BP2_TEMP
- IGDP_NRSI_C_BP3_TEMP
- IGDP_NRSI_C_BP4_TEMP
- IGDP_NRSI_C_RMA_TEMP
OA CAM Temperature
- OA COL Temperature
- OA COM1 Temperature
- OA FOR Temperature
- OA IFU Temperature
- OA BP1 Temperature
- OA BP2 Temperature
- OA BP3 Temperature
- OA BP4 Temperature
- OA RMA Temperature
Box TemperaturesIGDP_NRSD_ALG_TEMP
- INRSH_HK_TEMP1
- INRSH_HK_TEMP2
ICE Internal Temperature 1
- ICE Internal Temperature 2
MCE internal TemperaturesINRSM_MCE_PCA_TMP1
- INRSM_MCE_PCA_TMP2
- INRSM_MCE_AIC_TMP_FPGA
- INRSM_MCE_AIC_TMP_ADC
- INRSM_MCE_AIC_TMP_VREG
- INRSM_MCE_MDAC_TMP_FPGA
- INRSM_MCE_MDAC_TMP_OSC
- INRSM_MCE_MDAC_TMP_BRD
- INRSM_MCE_MDAC_TMP_PHA
- INRSM_MCE_MDAC_TMP_PHB
MCE PCA Board Temperature 1
- MCE PCA Board Temperature 2
- MCE AIC Board FPGA Temperature
- MCE AIC Board Analog/Digital Converter Temperature
- MCE AIC Board Voltage Regulator Temperature
- MCE MDAC Board FPGA Temperature
- MCE MDAC Board Oscillator Temperature
- MCE MDAC Board Temperature
- MCE MDAC Board Phase A PA10 Temperature
- MCE MDAC Board Phase B PA10 Temperature
MSA TemperaturesINRSM_Q1_TMP_A
- INRSM_Q2_TMP_A
- INRSM_Q3_TMP_A
- INRSM_Q4_TMP_A
- INRSM_MECH_MTR_TMP_A
- INRSM_LL_MTR_TMP_A
- INRSM_MSA_TMP_A
MSA Quad 1 Temperature
- MSA Quad 2 Temperature
- MSA Quad 3 Temperature
- MSA Quad 4 Temperature
- MSA Magnetic Arm Motor Temperature
- MSA Launch Lock Motor Temperature
- MSA Frame Temperature
FPA TemperaturesIGDP_NRSD_ALG_FPA_TEMP
- IGDP_NRSD_ALG_A1_TEMP
- IGDP_NRSD_ALG_A2_TEMP
FPE Temperature
- FPA Temperature
- ASIC 1 Temperature
- ASIC 2 Temperature
Heat Strap Temperatures (Trim Heaters)SI_GZCTS74A
- SI_GZCTS67A
FPA TH-Strap A Temperature from IRSU A
- FPA TH-Strap B Temperature from IRSU A
CAA Lamps / FWA,GWAIGDP_NRSI_C_CAAL1_TEMP
- IGDP_NRSI_C_CAAL2_TEMP
- IGDP_NRSI_C_CAAL3_TEMP
- IGDP_NRSI_C_CAAL4_TEMP
- IGDP_NRSI_C_FWA_TEMP
- IGDP_NRSI_C_GWA_TEMP
CAA Temperature LINE1
- CAA Temperature LINE2
- CAA Temperature LINE3
- CAA Temperature LINE4
- FWA Temperature Sensor Value
- GWA Temperature Sensor Value
- - """, width=1100) - - plot1 = irsu_temp(conn, start, end) - plot2 = fpe_temp(conn, start, end) - plot3 = box_temp(conn, start, end) - plot4 = mce_internal_temp(conn, start, end) - plot5 = msa_temp(conn, start, end) - plot6 = fpa_temp(conn, start, end) - plot7 = heat_strap_temp(conn, start, end) - plot8 = caal_temp(conn, start, end) - - layout = Column(descr, plot1, plot2, plot3, plot4, plot5, plot6, plot7, plot8) - - tab = Panel(child=layout, title="TEMPERATURE") - - return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/voltage_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/voltage_tab.py deleted file mode 100644 index ccb4ae18d..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/voltage_tab.py +++ /dev/null @@ -1,258 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for Ref. Voltage/Currents tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1 - Ref Voltages - INRSH_FWA_MOTOR_VREF - INRSH_GWA_MOTOR_VREF - INRSH_OA_VREF - - Plot 2 - ADCMGAIN (Voltages) - INRSH_FWA_ADCMGAIN - INRSH_GWA_ADCMGAIN - INRSH_RMA_ADCMGAIN - - Plot 3 - OFFSET (Voltages) - INRSH_GWA_ADCMOFFSET - INRSH_FWA_ADCMOFFSET - INRSH_OA_VREFOFF - INRSH_RMA_ADCMOFFSET - - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``nirspec_dashboard.py``, e.g.: - - :: - from .plots.voltage_tab import voltage_plots - tab = voltage_plots(conn, start, end) - -Dependencies ------------- - User must provide database "nirpsec_database.db" - -""" -import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import Column - - -def ref_volt(conn, start, end): - '''Create specific plot and return plot object - - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "Ref Voltages" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "FWA_MOTOR_VREF", "INRSH_FWA_MOTOR_VREF", start, end, conn, color="green") - b = pf.add_to_plot(p, "GWA_MOTOR_VREF", "INRSH_GWA_MOTOR_VREF", start, end, conn, color="blue") - c = pf.add_to_plot(p, "OA_VREF", "INRSH_OA_VREF", start, end, conn, color="red") - - pf.add_hover_tool(p, [a, b, c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - return p - - -def gain_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "ADCMAIN" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "FWA_ADCMGAIN", "INRSH_FWA_ADCMGAIN", start, end, conn, color="green") - b = pf.add_to_plot(p, "GWA_ADCMGAIN", "INRSH_GWA_ADCMGAIN", start, end, conn, color="blue") - c = pf.add_to_plot(p, "RMA_ADCMGAIN", "INRSH_RMA_ADCMGAIN", start, end, conn, color="red") - - # pf.add_hover_tool(p,[a,b,c]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - return p - - -def offset_volt(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - x_axis_type='datetime', - output_backend="webgl", - x_axis_label='Date', - y_axis_label='Voltage (V)') - - p.grid.visible = True - p.title.text = "OFFSET" - pf.add_basic_layout(p) - - a = pf.add_to_plot(p, "GWA_ADCMOFFSET", "INRSH_GWA_ADCMOFFSET", start, end, conn, color="blue") - b = pf.add_to_plot(p, "FWA_ADCMOFFSET", "INRSH_FWA_ADCMOFFSET", start, end, conn, color="green") - c = pf.add_to_plot(p, "OA_VREFOFF", "INRSH_OA_VREFOFF", start, end, conn, color="orange") - d = pf.add_to_plot(p, "RMA_ADCMOFFSET", "INRSH_RMA_ADCMOFFSET", start, end, conn, color="red") - - pf.add_hover_tool(p, [a, b, c, d]) - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = "horizontal" - - return p - - -def volt_plots(conn, start, end): - '''Combines plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
Ref VoltagesINRSH_FWA_MOTOR_VREF
- INRSH_GWA_MOTOR_VREF
- INRSH_OA_VREF
FWA Motor Reference Voltage for Calibration
- GWA Motor Reference Voltage for Calibration
- OA/RMA Reference Voltage for TM Calibration
-
ADCMGAININRSH_FWA_ADCMGAIN
- INRSH_GWA_ADCMGAIN
- INRSH_RMA_ADCMGAIN
FWA ADC Motor Chain Gain for Calibration
- GWA ADC Motor Chain Gain for Calibration
- RMA ADC Motor Chain Gain for Calibration
-
OFFSETINRSH_FWA_ADCMOFFSET
- INRSH_GWA_ADCMOFFSET
- INRSH_OA_VREFOFF
- INRSH_RMA_ADCMOFFSET
FWA ADC Motor Chain Offset for Calibration
- GWA ADC Motor Chain Offset for Calibration
- CAA Reference Voltage Offset for TM Calibration
- RMA ADC Motor Chain Offset for Calibration
-
- - """, width=1100) - - plot1 = ref_volt(conn, start, end) - plot2 = gain_volt(conn, start, end) - plot3 = offset_volt(conn, start, end) - - layout = Column(descr, plot1, plot2, plot3) - - tab = Panel(child=layout, title="REF VOLTAGES") - - return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/wheel_tab.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/wheel_tab.py deleted file mode 100644 index 577bfb55d..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/plots/wheel_tab.py +++ /dev/null @@ -1,256 +0,0 @@ -#! /usr/bin/env python -"""Prepares plots for WHEEL tab - - Module prepares plots for mnemonics below. Combines plots in a grid and - returns tab object. - - Plot 1 - Filterwheel - INRSI_FWA_MECH_POS - INRSI_C_FWA_POSITION - - Plot 2 - Gratingwheel X - INRSI_GWA_MECH_POS - INRSI_C_GWA_X_POSITION - - Plot 3 - Gratingwheel Y - INRSI_GWA_MECH_POS - INRSI_C_GWA_Y_POSITION - -Authors -------- - - Daniel Kühbacher - -Use ---- - The functions within this module are intended to be imported and - used by ``dashboard.py``, e.g.: - - :: - from .plots.wheel_ratio_tab import wheel_plots - tab = wheel_plots(conn, start, end) - -Dependencies ------------- - User must provide database "miri_database.db" - -""" - -import jwql.instrument_monitors.nirspec_monitors.data_trending.plots.plot_functions as pf -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn -from bokeh.plotting import figure -from bokeh.models.widgets import Panel, Div -from bokeh.layouts import column - - -def fw(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[-3, 3], - x_axis_type='datetime', - x_axis_label='Date', - y_axis_label='mV (normalized)') - - p.grid.visible = True - p.title.text = "Filterwheel" - p.title.align = "left" - pf.add_basic_layout(p) - - pf.add_to_plot_normalized(p, "F110W", "INRSI_C_FWA_POSITION_F110W", start, end, conn, mn.fw_nominals['F110W'], color="green") - pf.add_to_plot_normalized(p, "F100LP", "INRSI_C_FWA_POSITION_F100LP", start, end, conn, mn.fw_nominals['F100LP'], color="red") - pf.add_to_plot_normalized(p, "F140X", "INRSI_C_FWA_POSITION_F140X", start, end, conn, mn.fw_nominals['F140X'], color="blue") - pf.add_to_plot_normalized(p, "OPAQUE", "INRSI_C_FWA_POSITION_OPAQUE", start, end, conn, mn.fw_nominals['OPAQUE'], color="orange") - pf.add_to_plot_normalized(p, "F290LP", "INRSI_C_FWA_POSITION_F290LP", start, end, conn, mn.fw_nominals['F290LP'], color="purple") - pf.add_to_plot_normalized(p, "F170LP", "INRSI_C_FWA_POSITION_F170LP", start, end, conn, mn.fw_nominals['F170LP'], color="brown") - pf.add_to_plot_normalized(p, "CLEAR", "INRSI_C_FWA_POSITION_CLEAR", start, end, conn, mn.fw_nominals['CLEAR'], color="chocolate") - pf.add_to_plot_normalized(p, "F070LP", "INRSI_C_FWA_POSITION_F070LP", start, end, conn, mn.fw_nominals['F070LP'], color="darkmagenta") - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = 'horizontal' - return p - - -def gwx(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[-4, 4], - x_axis_type='datetime', - x_axis_label='Date', - y_axis_label='mV (normalized)') - - p.grid.visible = True - p.title.text = "Gratingwheel X" - p.title.align = "left" - pf.add_basic_layout(p) - - pf.add_to_plot_normalized(p, "PRISM", "INRSI_C_GWA_X_POSITION_PRISM", start, end, conn, mn.gwx_nominals['PRISM'], color="green") - pf.add_to_plot_normalized(p, "MIRROR", "INRSI_C_GWA_X_POSITION_MIRROR", start, end, conn, mn.gwx_nominals['MIRROR'], color="blue") - pf.add_to_plot_normalized(p, "G140H", "INRSI_C_GWA_X_POSITION_G140H", start, end, conn, mn.gwx_nominals['G140H'], color="red") - pf.add_to_plot_normalized(p, "G235H", "INRSI_C_GWA_X_POSITION_G235H", start, end, conn, mn.gwx_nominals['G235H'], color="purple") - pf.add_to_plot_normalized(p, "G395H", "INRSI_C_GWA_X_POSITION_G395H", start, end, conn, mn.gwx_nominals['G395H'], color="orange") - pf.add_to_plot_normalized(p, "G140M", "INRSI_C_GWA_X_POSITION_G140M", start, end, conn, mn.gwx_nominals['G140M'], color="brown") - pf.add_to_plot_normalized(p, "G235M", "INRSI_C_GWA_X_POSITION_G235M", start, end, conn, mn.gwx_nominals['G235M'], color="darkmagenta") - pf.add_to_plot_normalized(p, "G395M", "INRSI_C_GWA_X_POSITION_G395M", start, end, conn, mn.gwx_nominals['G395M'], color="darkcyan") - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = 'horizontal' - - return p - - -def gwy(conn, start, end): - '''Create specific plot and return plot object - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : Plot object - Bokeh plot - ''' - - # create a new plot with a title and axis labels - p = figure(tools="pan,wheel_zoom,box_zoom,reset,save", - toolbar_location="above", - plot_width=1120, - plot_height=500, - y_range=[-3, 3], - x_axis_type='datetime', - x_axis_label='Date', - y_axis_label='mV (normalized)') - - p.grid.visible = True - p.title.text = "Gratingwheel Y" - p.title.align = "left" - pf.add_basic_layout(p) - - pf.add_to_plot_normalized(p, "PRISM", "INRSI_C_GWA_Y_POSITION_PRISM", start, end, conn, mn.gwy_nominals['PRISM'], color="green") - pf.add_to_plot_normalized(p, "MIRROR", "INRSI_C_GWA_Y_POSITION_MIRROR", start, end, conn, mn.gwy_nominals['MIRROR'], color="blue") - pf.add_to_plot_normalized(p, "G140H", "INRSI_C_GWA_Y_POSITION_G140H", start, end, conn, mn.gwy_nominals['G140H'], color="red") - pf.add_to_plot_normalized(p, "G235H", "INRSI_C_GWA_Y_POSITION_G235H", start, end, conn, mn.gwy_nominals['G235H'], color="purple") - pf.add_to_plot_normalized(p, "G395H", "INRSI_C_GWA_Y_POSITION_G395H", start, end, conn, mn.gwy_nominals['G395H'], color="orange") - pf.add_to_plot_normalized(p, "G140M", "INRSI_C_GWA_Y_POSITION_G140M", start, end, conn, mn.gwy_nominals['G140M'], color="brown") - pf.add_to_plot_normalized(p, "G235M", "INRSI_C_GWA_Y_POSITION_G235M", start, end, conn, mn.gwy_nominals['G235M'], color="darkmagenta") - pf.add_to_plot_normalized(p, "G395M", "INRSI_C_GWA_Y_POSITION_G395M", start, end, conn, mn.gwy_nominals['G395M'], color="darkcyan") - - p.legend.location = "bottom_right" - p.legend.click_policy = "hide" - p.legend.orientation = 'horizontal' - return p - - -def wheel_pos(conn, start, end): - '''Combine plots to a tab - Parameters - ---------- - conn : DBobject - Connection object that represents database - start : time - Startlimit for x-axis and query (typ. datetime.now()- 4Months) - end : time - Endlimit for x-axis and query (typ. datetime.now()) - Return - ------ - p : tab object - used by dashboard.py to set up dashboard - ''' - descr = Div(text= - """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Plotname
Mnemonic
Description
FilterwheelINRSI_FWA_MECH_POS
- INRSI_C_FWA_POSITION
Position Sensor Value
- Current Position
Gratingwheel XINRSI_GWA_MECH_POS
- INRSI_C_GWA_X_POSITION
Position X Sensor Value
- Current Position
Gratingwheel YINRSI_GWA_MECH_POS
- INRSI_C_GWA_Y_POSITION
Position Y Sensor Value
- Current Position
- - """, width=1100) - - plot1 = fw(conn, start, end) - plot2 = gwx(conn, start, end) - plot3 = gwy(conn, start, end) - - layout = column(descr, plot1, plot2, plot3) - tab = Panel(child=layout, title="FILTER/GRATINGWHEEL") - - return tab diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/__init__.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/condition.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/condition.py deleted file mode 100644 index 4a58bc1b5..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/condition.py +++ /dev/null @@ -1,402 +0,0 @@ -#! /usr/bin/env python -"""Module generates conditions over one or more mnemonics - -The modules purpose is to return True/False for any times by reference of -certain conditions. If for instance the condition "x>1" over a defined period -of time is needed, the module looks for all elements where the condition -applies and where it does not apply. This generates two lists, which contain -the "start" and "end" times of the condition. -A futher function combines the start- and endtimes to time-tuples between which -the condition is known as TRUE. A "state" function returns True/False for an -exact time attribute, whereby the condition is represented in binary form. - -Authors -------- - - Daniel Kühbacher - -Use ---- - This module is not prepared for standalone use. - - For use in programm set condition up like below: - - import the module as follow: - >>>import condition as cond - - generate list with required conditions: - >>>con_set = [ cond.equal(m.mnemonic('IMIR_HK_POM_LOOP'),'OFF'), - cond.smaller(m.mnemonic('IMIR_HK_ICE_SEC_VOLT1'),1), - cond.greater(m.mnemonic('SE_ZIMIRICEA'),0.2)] - - generate object of condition with the con_set as attribute: - >>>condition_object=cond.condition(con_set) - - Now the condition_object can return a True/False statement wheather - the time given as attribut meets the conditions: - - >>>if condition.state(float(element['Primary Time'])): - -> True when condition for the given time applies - -> False when condition for the given time is not applicable - -Dependencies ------------- - no external files needed - -References ----------- - -Notes ------ - -""" - - -class condition: - """Class to hold several subconditions""" - - # contains list of representative time pairs for each subcondition - cond_time_pairs = [] - # state of the condition - __state = False - - # initializes condition through condition set - def __init__(self, cond_set): - """Initialize object with set of conditions - Parameters - ---------- - cond_set : list - list contains subconditions objects - """ - self.cond_set = cond_set - - # destructor -> take care that all time_pairs are deleted! - def __del__(self): - """Delete object - destructor method""" - del self.cond_time_pairs[:] - - # prints all stored time pairs (for developement only) - def print_times(self): - """Print conditions time pairs on command line (developement)""" - print('Available time pairs:') - for times in self.cond_time_pairs: - print('list: '+str(times)) - - # returns a interval if time is anywhere in between - def get_interval(self, time): - """Returns time interval if availlable, where "time" is in between - Parameters - ---------- - time : float - given time attribute - Return - ------ - time_pair : tuple - pair of start_time and end_time where time is in between - """ - end_time = 10000000 - start_time = 0 - - # do for every condition - for cond in self.cond_time_pairs: - # do for every time pair in condition - for pair in cond: - if (time > pair[0]) and (time < pair[1]): - if (end_time > pair[1]) and (start_time < pair[0]): - start_time = pair[0] - end_time = pair[1] - break - else: - break - - if (end_time != 10000000) and (start_time != 0): - return [start_time, end_time] - else: - return None - - # generates time pairs out of start and end times - def generate_time_pairs(start_times, end_times): - """Forms time pairs out of start times and end times - Parameters - ---------- - start_times : list - contains all times where a condition applies - end_times : list - contains all times where the condition does not apply - Return - ------ - time_pair : list - list of touples with start and end time - """ - # internal use only - time_pair: float = [] - - # when the conditons doesn´t apply anyway - if not start_times: - time_pair.append((0,0)) - - # check if the condition indicates an open time range - elif not end_times: - time_pair.append((start_times[0], 0)) - - # generate time pairs - # for each start time a higher or equal end time is searched for - # these times form am touple which is appended to time_pair : list - else: - time_hook = 0 - last_start_time = 0 - - for start in list(sorted(set(start_times))): - - if(start > time_hook): - for end in list(sorted(set(end_times))): - - if end > start: - - time_pair.append((start, end)) - time_hook = end - break - - if list(sorted(set(start_times)))[-1] > list(sorted(set(end_times)))[-1]: - time_pair.append((list(sorted(set(end_times)))[-1], 0)) - - return(time_pair) - - # returns state of the condition at a given time - # if state(given time)==True -> condition is true - # if state(given time)==False -> condition is false - def state(self, time): - """Checks whether condition is true of false at a given time - Parameters - ---------- - time : float - input time for condition query - Return - ------ - state : bool - True/False statement whether the condition applies or not - """ - # checks condition for every sub condition in condition set (subconditions) - - state = self.__state - - for cond in self.cond_time_pairs: - - if self.__check_subcondition(cond, time): - state = True - else: - state = False - break - - return state - - def __check_subcondition(self, cond, time): - - # if there are no values availlable - if cond[0][0] == 0: - return False - - for time_pair in cond: - # if just a positive time is availlable, return true - if (time_pair[1] == 0) and (time > time_pair[0]): - - return True - - # if given time occurs between a time pair, return true - elif (time_pair[0]) <= time and (time < time_pair[1]): - - return True - - else: - pass - - -class equal(condition): - """Class to hold single "is equal" subcondition""" - - stringval = True - - # add attributes to function - start function "cond_time_pairs()" - def __init__(self, mnemonic, value, stringval=True): - """Initializes subconditon - Parameters - ---------- - mnemonic : astropy table - includes mnemomic engineering data and corresponding primary time - value : str - coparison value for equal statement - """ - self.mnemonic = mnemonic - self.value = value - self.stringval = stringval - condition.cond_time_pairs.append((self.cond_true_time())) - - # generates a list of time-touples (start_time, end_time) that mark - # the beginning and end of whether the condition is true or not - def cond_true_time(self): - """Filters all values that are equal to a given comparison value - if equal: Primary time -> temp_start - if not equal: Primary time -> temp_end - Return - ------ - time_p : list - list of touples with start and end time - """ - temp_start = [] - temp_end = [] - - for key in self.mnemonic: - - # find all times whoses Raw values equal the given value - if self.stringval: - if key['value'] == self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - else: - # just another option to compare float values - if float(key['value']) == self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - - time_p = condition.generate_time_pairs(temp_start, temp_end) - return time_p - - -class unequal(condition): - """Class to hold single "is unequal" subcondition""" - - # add attributes to function - start function "cond_time_pairs()" - def __init__(self, mnemonic, value): - """Initializes subconditon - Parameters - ---------- - mnemonic : astropy table - includes mnemomic engineering data and corresponding primary time - value : str - coparison value for equal statement - """ - self.mnemonic = mnemonic - self.value = value - condition.cond_time_pairs.append((self.cond_true_time())) - - # generates a list of time-touples (start_time, end_time) that mark - # the beginning and end of whether the condition is true or not - def cond_true_time(self): - """Filters all values that are equal to a given comparison value - if equal: Primary time -> temp_start - if not equal: Primary time -> temp_end - Return - ------ - time_p : list - list of touples with start and end time - """ - temp_start = [] - temp_end = [] - - for key in self.mnemonic: - - # find all times whoses Raw values equal the given value - if key['value'] != self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - - time_p = condition.generate_time_pairs(temp_start, temp_end) - return time_p - - -class greater(condition): - """Class to hold single "greater than" subcondition""" - - # add attributes to function - start function "cond_time_pairs()" - def __init__(self, mnemonic, value): - """Initializes subconditon - Parameters - ---------- - mnemonic : astropy table - includes mnemomic engineering data and corresponding primary time - value : str - coparison value for equal statement - """ - self.mnemonic= mnemonic - self.value=value - condition.cond_time_pairs.append((self.cond_true_time())) - - def cond_true_time(self): - """Filters all values that are greater than a given comparison value - if equal: Primary time -> temp_start - if not equal: Primary time -> temp_end - Return - ------ - time_p : list - list of touples with start and end time - """ - temp_start: float = [] - temp_end: float = [] - - for key in self.mnemonic: - - # find all times whose Raw values are grater than the given value - if float(key['value']) > self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - - time_p = condition.generate_time_pairs(temp_start, temp_end) - return time_p - - -class smaller(condition): - """Class to hold single "greater than" subcondition""" - - # add attributes to function - start function "cond_time_pairs()" - def __init__(self, mnemonic, value): - """Initializes subconditon - Parameters - ---------- - mnemonic : astropy table - includes mnemomic engineering data and corresponding primary time - value : str - coparison value for equal statement - """ - self.mnemonic=mnemonic - self.value=value - condition.cond_time_pairs.append((self.cond_true_time())) - - def cond_true_time(self): - """Filters all values that are greater than a given comparison value - if equal: Primary time -> temp_start - if not equal: Primary time -> temp_end - Return - ------ - time_p : list - list of touples with start and end time - """ - temp_start: float = [] - temp_end: float = [] - - for key in self.mnemonic: - - # find all times whose Raw values are grater than the given value - if float(key['value']) < self.value: - temp_start.append(key["time"]) - - # find all end values - else: - temp_end.append(key["time"]) - - time_p = condition.generate_time_pairs(temp_start, temp_end) - return time_p - - -if __name__ == '__main__': - pass diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/csv_to_AstropyTable.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/csv_to_AstropyTable.py deleted file mode 100644 index 99c97252e..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/csv_to_AstropyTable.py +++ /dev/null @@ -1,149 +0,0 @@ -#! /usr/bin/env python -"""Module for importing and sorting mnemonics - -This module imports a whole set of mnemonics from a .CSV sheet and converts it -to an astropy table. In a second step the table is sorted by its mnemoncis -and for each mnemmonic another astropy table with reduced content is created. -The last step is to append the data (time and engineering value) with its -mnemonic identifier as key to a dictionary. - -Authors -------- - - Daniel Kühbacher - -Use ---- - - -Dependencies ------------- - mnemonics.py -> includes a list of mnemonics to be evaluated - -References ----------- - -Notes ------ - -""" -from astropy.table import Table -from astropy.time import Time -import warnings -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn - - -class mnemonics: - """class to hold a set of mnemonics""" - - __mnemonic_dict = {} - - def __init__(self, import_path): - """main function of this class - Parameters - ---------- - import_path : str - defines file to import (csv sheet) - """ - imported_data = self.import_CSV(import_path) - length = len(imported_data) - - print('{} was imported - {} lines'.format(import_path, length)) - - # look for every mnmonic given in mnemonicy.py - for mnemonic_name in mn.mnemonic_set_query: - temp = self.sort_mnemonic(mnemonic_name, imported_data) - # append temp to dict with related mnemonic - if temp is not None: - self.__mnemonic_dict.update({mnemonic_name: temp}) - else: - warnings.warn("fatal error") - - def import_CSV(self, path): - """imports csv sheet and converts it to AstropyTable - Parameters - ---------- - path : str - defines path to file to import - Return - ------ - imported_data : AstropyTable - container for imported data - """ - # read data from given *CSV file - imported_data = Table.read(path, format='ascii.basic', delimiter=',') - return imported_data - - # returns table of single mnemonic - def mnemonic(self, name): - """Returns table of one single mnemonic - Parameters - ---------- - name : str - name of mnemonic - Return - ------ - __mnemonic_dict[name] : AstropyTable - corresponding table to mnemonic name - """ - try: - return self.__mnemonic_dict[name] - except KeyError: - print('{} not in list'.format(name)) - - # looks for given mnemonic in given table - # returns list containing astropy tables with sorted mnemonics and - # engineering values adds useful meta data to Table - def sort_mnemonic(self, mnemonic, table): - """Looks for all values in table with identifier "mnemonic" - Converts time string to mjd format - Parameters - ---------- - mnemonic : str - identifies which mnemonic to look for - table : AstropyTable - table that stores mnemonics and data - Return - ------ - mnemonic_table : AstropyTable - stores all data associated with identifier "mnemonic" - """ - - temp1: float = [] - temp2 = [] - - # appends present mnemonic data to temp arrays temp1 and temp2 - for item in table: - try: - if item['Telemetry Mnemonic'] == mnemonic: - # convert time string to mjd format - temp = item['Secondary Time'].replace('/', '-').replace(' ', 'T') - t = Time(temp, format='isot') - - temp1.append(t.mjd) - temp2.append(item['EU Value']) - except KeyError: - warnings.warn("{} is not in mnemonic table".format(mnemonic)) - - description = ('time','value') - data = [temp1, temp2] - - # add some meta data - if len(temp1) > 0: - date_start = temp1[0] - date_end = temp1[len(temp1)-1] - info = {'start': date_start, 'end': date_end} - else: - info = {"n": "n"} - - # add name of mnemonic to meta data of list - info['mnemonic'] = mnemonic - info['len'] = len(temp1) - - # table to return - mnemonic_table = Table(data, names=description, - dtype=('f8', 'str'), meta=info) - return mnemonic_table - - -if __name__ == '__main__': - pass diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/mnemonics.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/mnemonics.py deleted file mode 100644 index cf9515dc0..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/mnemonics.py +++ /dev/null @@ -1,541 +0,0 @@ -"""Module lists all neccessary mnemonics for NIRSpec data trending - -The module includes several lists to import to NIRSpec data trending monitor program. -The lists are used for data aquisation and to set up the initial database. - -Authors -------- - - Daniel Kühbacher - -Use ---- - import mnemoncis as mn - -References ----------- - JWQL_NIRSpec_INputs_V4[2414].xlsx - -Notes ------ - -""" - -# mnemonics underlaying certain conditions 15min -# INRSD_EXP_STAT != STARTED -mnemonic_cond_1 = [ -"SE_ZINRSFPEA", -"SE_ZINRSFPEB"] - -# mnemonics underlaying condition 15min -# INRSH_LAMP_SEL = NO_LAMP -mnemonic_cond_2 = [ -"SE_ZINRSICEA", -"SE_ZINRSICEB"] - -mnemonic_cond_3 = [ -"SE_ZINRSMCEA", -"SE_ZINRSMCEB"] - -# menmonics applicable when CAA is powered -# INRSH_CAA_PWRF_ST = ON -mnemonic_caa = [ -"IGDP_NRSI_C_CAAL1_TEMP", -"IGDP_NRSI_C_CAAL2_TEMP", -"IGDP_NRSI_C_CAAL3_TEMP", -"IGDP_NRSI_C_CAAL4_TEMP"] - -# only applicable when Filter table 10 is set -mnemonic_ft10 = [ -"INRSH_OA_VREFOFF", -"INRSH_OA_VREF", - -"INRSH_CAA_VREFOFF", -"INRSH_CAA_VREF", - -"INRSH_FWA_ADCMGAIN", -"INRSH_FWA_ADCMOFFSET", -"INRSH_FWA_MOTOR_VREF", - -"INRSH_GWA_ADCMGAIN", -"INRSH_GWA_ADCMOFFSET", -"INRSH_GWA_MOTOR_VREF", - -"INRSH_RMA_ADCMGAIN", -"INRSH_RMA_ADCMOFFSET"] - -# all mnemonics used for conditions -mnemonic_for_conditions = [ -"INRSM_MOVE_STAT", -"INRSH_WHEEL_MOT_SVREF", -"INRSI_CAA_ON_FLAG", -"INRSH_LAMP_SEL", -"INRSD_EXP_STAT", - -"INRSH_CAA_PWRF_ST", - -"INRSI_FWA_MOVE_ST", -"INRSI_FWA_MECH_POS", -"INRSI_GWA_MOVE_ST", -"INRSI_GWA_MECH_POS", - -"INRSI_C_FWA_POSITION", -"INRSI_C_GWA_X_POSITION", -"INRSI_C_GWA_Y_POSITION", - -"ICTM_RT_FILTER"] - -# these mnemonic are used by the day routine -mnemSet_day = [ -"INRSM_MSA_Q1_365VDD", -"INRSM_MSA_Q1_365VPP", -"INRSM_MSA_Q1_171VPP", -"IGDPM_MSA_Q1_365IDD", -"IGDPM_MSA_Q1_365IPP", -"IGDPM_MSA_Q1_171RTN", - -"INRSM_MSA_Q2_365VDD", -"INRSM_MSA_Q2_365VPP", -"INRSM_MSA_Q2_171VPP", -"IGDPM_MSA_Q2_365IDD", -"IGDPM_MSA_Q2_365IPP", -"IGDPM_MSA_Q2_171RTN", - -"INRSM_MSA_Q3_365VDD", -"INRSM_MSA_Q3_365VPP", -"INRSM_MSA_Q3_171VPP", -"IGDPM_MSA_Q3_365IDD", -"IGDPM_MSA_Q3_365IPP", -"IGDPM_MSA_Q3_171RTN", - -"INRSM_MSA_Q4_365VDD", -"INRSM_MSA_Q4_365VPP", -"INRSM_MSA_Q4_171VPP", -"IGDPM_MSA_Q4_365IDD", -"IGDPM_MSA_Q4_365IPP", -"IGDPM_MSA_Q4_171RTN", - -"IGDP_NRSD_ALG_FPA_TEMP", -"IGDP_NRSD_ALG_A1_TEMP", -"IGDP_NRSD_ALG_A2_TEMP", -"IGDP_NRSI_C_FWA_TEMP", -"IGDP_NRSI_C_GWA_TEMP", - -"SI_GZCTS74A", -"SI_GZCTS74B", -"SI_GZCTS67A", -"SI_GZCTS67B"] - -# these mnemonic are used by the 15min routine -mnemSet_15min = [ -"IGDP_NRSD_ALG_TEMP", - -"INRSD_ALG_ACC_P12C", -"INRSD_ALG_ACC_N12C", -"INRSD_ALG_ACC_3D3_1D5_C", -"INRSD_ALG_CHASSIS", - -"IGDP_NRSD_ALG_A1_VDD_C", -"IGDP_NRSD_ALG_A1_VDDA", -"IGDP_NRSD_ALG_A1VDAP12C", -"IGDP_NRSD_ALG_A1VDAN12C", -"IGDP_NRSD_ALG_A1GND4VDA", -"IGDP_NRSD_ALG_A1GND5VRF", -"INRSD_ALG_A1_VDD3P3", -"INRSD_ALG_A1_VDD", -"INRSD_ALG_A1_REF", -"INRSD_A1_DSUB_V", -"INRSD_A1_VRESET_V", -"INRSD_A1_CELLDRN_V", -"INRSD_A1_DRAIN_V", -"INRSD_A1_VBIASGATE_V", -"INRSD_A1_VBIASPWR_V", -"INRSD_A1_VDDA_I", - -"IGDP_NRSD_ALG_A2_VDD_C", -"IGDP_NRSD_ALG_A2_VDDA", -"IGDP_NRSD_ALG_A2VDAP12C", -"IGDP_NRSD_ALG_A2VDAN12C", -"IGDP_NRSD_ALG_A2GND4VDA", -"IGDP_NRSD_ALG_A2GND5VRF", -"INRSD_ALG_A2_VDD3P3", -"INRSD_ALG_A2_VDD", -"INRSD_ALG_A2_REF", -"INRSD_A2_DSUB_V", -"INRSD_A2_VRESET_V", -"INRSD_A2_CELLDRN_V", -"INRSD_A2_DRAIN_V", -"INRSD_A2_VBIASGATE_V", -"INRSD_A2_VBIASPWR_V", -"INRSD_A2_VDDA_I", - -"INRSH_HK_TEMP1", -"INRSH_HK_TEMP2", - -"INRSH_HK_P15V", -"INRSH_HK_N15V", -"INRSH_HK_VMOTOR", -"INRSH_HK_P5V", -"INRSH_HK_2P5V", -"INRSH_HK_ADCTGAIN", -"INRSH_HK_ADCTOFFSET", - -"IGDP_NRSI_C_CAM_TEMP", -"IGDP_NRSI_C_COL_TEMP", -"IGDP_NRSI_C_COM1_TEMP", -"IGDP_NRSI_C_FOR_TEMP", -"IGDP_NRSI_C_IFU_TEMP", -"IGDP_NRSI_C_BP1_TEMP", -"IGDP_NRSI_C_BP2_TEMP", -"IGDP_NRSI_C_BP3_TEMP", -"IGDP_NRSI_C_BP4_TEMP", -"IGDP_NRSI_C_RMA_TEMP", - -"SI_GZCTS75A", -"SI_GZCTS68A", -"SI_GZCTS81A", -"SI_GZCTS80A", -"SI_GZCTS70A", -"SI_GZCTS76A", -"SI_GZCTS79A", -"SI_GZCTS77A", -"SI_GZCTS78A", -"SI_GZCTS69A", - -"INRSM_MCE_AIC_1R5_V", -"INRSM_MCE_AIC_3R3_V", -"INRSM_MCE_AIC_5_V", -"INRSM_MCE_AIC_P12_V", -"INRSM_MCE_AIC_N12_V", -"INRSM_MCE_AIC_3R3_I", -"INRSM_MCE_AIC_5_I", -"INRSM_MCE_AIC_P12_I", -"INRSM_MCE_AIC_N12_I", - -"INRSM_MCE_MDAC_1R5_V", -"INRSM_MCE_MDAC_3R3_V", -"INRSM_MCE_MDAC_5_V", -"INRSM_MCE_MDAC_P12_V", -"INRSM_MCE_MDAC_N12_V", -"INRSM_MCE_MDAC_3R3_I", -"INRSM_MCE_MDAC_5_I", -"INRSM_MCE_MDAC_P12_I", -"INRSM_MCE_MDAC_N12_I", - -"INRSM_MCE_PCA_TMP1", -"INRSM_MCE_PCA_TMP2", -"INRSM_MCE_AIC_TMP_FPGA", -"INRSM_MCE_AIC_TMP_ADC", -"INRSM_MCE_AIC_TMP_VREG", -"INRSM_MCE_MDAC_TMP_FPGA", -"INRSM_MCE_MDAC_TMP_OSC", -"INRSM_MCE_MDAC_TMP_BRD", -"INRSM_MCE_MDAC_TMP_PHA", -"INRSM_MCE_MDAC_TMP_PHB", - -"INRSM_Q1_TMP_A", -"INRSM_Q2_TMP_A", -"INRSM_Q3_TMP_A", -"INRSM_Q4_TMP_A", -"INRSM_MECH_MTR_TMP_A", -"INRSM_LL_MTR_TMP_A", -"INRSM_MSA_TMP_A"] - -# mnemonic set for setting up database -mnemonic_set_database = [ -"GP_ZPSVOLT", -"SE_ZINRSFPEA", -"SE_ZINRSFPEB", - -"IGDP_NRSD_ALG_TEMP", - -"IGDP_NRSD_ALG_FPA_TEMP", -"IGDP_NRSD_ALG_A1_TEMP", -"IGDP_NRSD_ALG_A2_TEMP", -"SI_GZCTS74A", -"SI_GZCTS74B", -"SI_GZCTS67A", -"SI_GZCTS67B", - -"INRSD_ALG_ACC_P12C", -"INRSD_ALG_ACC_N12C", -"INRSD_ALG_ACC_3D3_1D5_C", -"INRSD_ALG_CHASSIS", - -"IGDP_NRSD_ALG_A1_VDD_C", -"IGDP_NRSD_ALG_A1_VDDA", -"IGDP_NRSD_ALG_A1VDAP12C", -"IGDP_NRSD_ALG_A1VDAN12C", -"IGDP_NRSD_ALG_A1GND4VDA", -"IGDP_NRSD_ALG_A1GND5VRF", -"INRSD_ALG_A1_VDD3P3", -"INRSD_ALG_A1_VDD", -"INRSD_ALG_A1_REF", -"INRSD_A1_DSUB_V", -"INRSD_A1_VRESET_V", -"INRSD_A1_CELLDRN_V", -"INRSD_A1_DRAIN_V", -"INRSD_A1_VBIASGATE_V", -"INRSD_A1_VBIASPWR_V", -"INRSD_A1_VDDA_I", - -"IGDP_NRSD_ALG_A2_VDD_C", -"IGDP_NRSD_ALG_A2_VDDA", -"IGDP_NRSD_ALG_A2VDAP12C", -"IGDP_NRSD_ALG_A2VDAN12C", -"IGDP_NRSD_ALG_A2GND4VDA", -"IGDP_NRSD_ALG_A2GND5VRF", -"INRSD_ALG_A2_VDD3P3", -"INRSD_ALG_A2_VDD", -"INRSD_ALG_A2_REF", -"INRSD_A2_DSUB_V", -"INRSD_A2_VRESET_V", -"INRSD_A2_CELLDRN_V", -"INRSD_A2_DRAIN_V", -"INRSD_A2_VBIASGATE_V", -"INRSD_A2_VBIASPWR_V", -"INRSD_A2_VDDA_I", - -"SE_ZINRSICEA", -"SE_ZINRSICEB", - -"INRSH_HK_TEMP1", -"INRSH_HK_TEMP2", - -"INRSH_HK_P15V", -"INRSH_HK_N15V", -"INRSH_HK_VMOTOR", -"INRSH_HK_P5V", -"INRSH_HK_2P5V", -"INRSH_HK_ADCTGAIN", -"INRSH_HK_ADCTOFFSET", - -"INRSH_OA_VREFOFF", -"INRSH_OA_VREF", - -"IGDP_NRSI_C_CAM_TEMP", -"IGDP_NRSI_C_COL_TEMP", -"IGDP_NRSI_C_COM1_TEMP", -"IGDP_NRSI_C_FOR_TEMP", -"IGDP_NRSI_C_IFU_TEMP", -"IGDP_NRSI_C_BP1_TEMP", -"IGDP_NRSI_C_BP2_TEMP", -"IGDP_NRSI_C_BP3_TEMP", -"IGDP_NRSI_C_BP4_TEMP", -"IGDP_NRSI_C_RMA_TEMP", - -"INRSH_CAA_VREFOFF", -"INRSH_CAA_VREF", - -"INRSH_LAMP_SEL", -"INRSI_C_CAA_CURRENT", -"INRSI_C_CAA_VOLTAGE", - -"IGDP_NRSI_C_CAAL1_TEMP", -"IGDP_NRSI_C_CAAL2_TEMP", -"IGDP_NRSI_C_CAAL3_TEMP", -"IGDP_NRSI_C_CAAL4_TEMP", - -"INRSH_FWA_ADCMGAIN", -"INRSH_FWA_ADCMOFFSET", -"INRSH_FWA_MOTOR_VREF", - -"IGDP_NRSI_C_FWA_TEMP", - -"INRSH_GWA_ADCMGAIN", -"INRSH_GWA_ADCMOFFSET", -"INRSH_GWA_MOTOR_VREF", - -"IGDP_NRSI_C_GWA_TEMP", - -"INRSH_RMA_ADCMGAIN", -"INRSH_RMA_ADCMOFFSET", - -"SI_GZCTS75A", -"SI_GZCTS68A", -"SI_GZCTS81A", -"SI_GZCTS80A", -"SI_GZCTS70A", -"SI_GZCTS76A", -"SI_GZCTS79A", -"SI_GZCTS77A", -"SI_GZCTS78A", -"SI_GZCTS69A", -"SI_GZCTS75B", -"SI_GZCTS68B", -"SI_GZCTS81B", -"SI_GZCTS80B", -"SI_GZCTS70B", -"SI_GZCTS76B", -"SI_GZCTS79B", -"SI_GZCTS77B", -"SI_GZCTS78B", -"SI_GZCTS69B", - -"SE_ZINRSMCEA", -"SE_ZINRSMCEB", - -"INRSM_MCE_AIC_1R5_V", -"INRSM_MCE_AIC_3R3_V", -"INRSM_MCE_AIC_5_V", -"INRSM_MCE_AIC_P12_V", -"INRSM_MCE_AIC_N12_V", -"INRSM_MCE_AIC_3R3_I", -"INRSM_MCE_AIC_5_I", -"INRSM_MCE_AIC_P12_I", -"INRSM_MCE_AIC_N12_I", - -"INRSM_MCE_MDAC_1R5_V", -"INRSM_MCE_MDAC_3R3_V", -"INRSM_MCE_MDAC_5_V", -"INRSM_MCE_MDAC_P12_V", -"INRSM_MCE_MDAC_N12_V", -"INRSM_MCE_MDAC_3R3_I", -"INRSM_MCE_MDAC_5_I", -"INRSM_MCE_MDAC_P12_I", -"INRSM_MCE_MDAC_N12_I", - -"INRSM_MCE_PCA_TMP1", -"INRSM_MCE_PCA_TMP2", -"INRSM_MCE_AIC_TMP_FPGA", -"INRSM_MCE_AIC_TMP_ADC", -"INRSM_MCE_AIC_TMP_VREG", -"INRSM_MCE_MDAC_TMP_FPGA", -"INRSM_MCE_MDAC_TMP_OSC", -"INRSM_MCE_MDAC_TMP_BRD", -"INRSM_MCE_MDAC_TMP_PHA", -"INRSM_MCE_MDAC_TMP_PHB", - -"INRSM_Q1_TMP_A", -"INRSM_Q2_TMP_A", -"INRSM_Q3_TMP_A", -"INRSM_Q4_TMP_A", -"INRSM_MECH_MTR_TMP_A", -"INRSM_LL_MTR_TMP_A", -"INRSM_MSA_TMP_A", - -"INRSM_Q1_TMP_B", -"INRSM_Q2_TMP_B", -"INRSM_Q3_TMP_B", -"INRSM_Q4_TMP_B", -"INRSM_MECH_MTR_TMP_B", -"INRSM_LL_MTR_TMP_B", -"INRSM_MSA_TMP_B", - -"INRSM_MSA_Q1_365VDD", -"INRSM_MSA_Q1_365VPP", -"INRSM_MSA_Q1_171VPP", -"IGDPM_MSA_Q1_365IDD", -"IGDPM_MSA_Q1_365IPP", -"IGDPM_MSA_Q1_171RTN", - -"INRSM_MSA_Q2_365VDD", -"INRSM_MSA_Q2_365VPP", -"INRSM_MSA_Q2_171VPP", -"IGDPM_MSA_Q2_365IDD", -"IGDPM_MSA_Q2_365IPP", -"IGDPM_MSA_Q2_171RTN", - -"INRSM_MSA_Q3_365VDD", -"INRSM_MSA_Q3_365VPP", -"INRSM_MSA_Q3_171VPP", -"IGDPM_MSA_Q3_365IDD", -"IGDPM_MSA_Q3_365IPP", -"IGDPM_MSA_Q3_171RTN", - -"INRSM_MSA_Q4_365VDD", -"INRSM_MSA_Q4_365VPP", -"INRSM_MSA_Q4_171VPP", -"IGDPM_MSA_Q4_365IDD", -"IGDPM_MSA_Q4_365IPP", -"IGDPM_MSA_Q4_171RTN", - -"LAMP_FLAT1_CURR", -"LAMP_FLAT2_CURR", -"LAMP_FLAT3_CURR", -"LAMP_FLAT4_CURR", -"LAMP_FLAT5_CURR", -"LAMP_LINE1_CURR", -"LAMP_LINE2_CURR", -"LAMP_LINE3_CURR", -"LAMP_LINE4_CURR", -"LAMP_REF_CURR", -"LAMP_TEST_CURR", - -"LAMP_FLAT1_VOLT", -"LAMP_FLAT2_VOLT", -"LAMP_FLAT3_VOLT", -"LAMP_FLAT4_VOLT", -"LAMP_FLAT5_VOLT", -"LAMP_LINE1_VOLT", -"LAMP_LINE2_VOLT", -"LAMP_LINE3_VOLT", -"LAMP_LINE4_VOLT", -"LAMP_REF_VOLT", -"LAMP_TEST_VOLT"] - -mnemonic_wheelpositions = [ -"INRSI_C_FWA_POSITION_F110W", -"INRSI_C_FWA_POSITION_F100LP", -"INRSI_C_FWA_POSITION_F140X", -"INRSI_C_FWA_POSITION_OPAQUE", -"INRSI_C_FWA_POSITION_F290LP", -"INRSI_C_FWA_POSITION_F170LP", -"INRSI_C_FWA_POSITION_CLEAR", -"INRSI_C_FWA_POSITION_F070LP", - -"INRSI_C_GWA_X_POSITION_PRISM", -"INRSI_C_GWA_Y_POSITION_PRISM", - -"INRSI_C_GWA_X_POSITION_MIRROR", -"INRSI_C_GWA_Y_POSITION_MIRROR", - -"INRSI_C_GWA_X_POSITION_G140H", -"INRSI_C_GWA_Y_POSITION_G140H", - -"INRSI_C_GWA_X_POSITION_G235H", -"INRSI_C_GWA_Y_POSITION_G235H", - -"INRSI_C_GWA_X_POSITION_G395H", -"INRSI_C_GWA_Y_POSITION_G395H", - -"INRSI_C_GWA_X_POSITION_G140M", -"INRSI_C_GWA_Y_POSITION_G140M", - -"INRSI_C_GWA_X_POSITION_G235M", -"INRSI_C_GWA_Y_POSITION_G235M", - -"INRSI_C_GWA_X_POSITION_G395M", -"INRSI_C_GWA_Y_POSITION_G395M" ] - -fw_nominals = { -'F110W': -123.99, -'F100LP' : -10.32, -'CLEAR' : -56.44, -'F070LP' : 43.45, -'F140X' : -78.37, -'OPAQUE' : 21.58, -'F290LP' : -95.78, -'F170LP' : 8.95} - -gwx_nominals = { -'PRISM' : 169.01, -'MIRROR' : 171.11, -'G140H' : 180.25, -'G235H' : 176.66, -'G395H' : 159.96, -'G140M' : 164.31, -'G235M' : 159.24, -'G395M' : 141.69} - -gwy_nominals = { -'PRISM' : 17.08, -'MIRROR' : 98.72, -'G140H' : 67.47, -'G235H' : 70.00, -'G395H' : 73.29, -'G140M' : 63.18, -'G235M' : 69.81, -'G395M' : 89.57} - -# use this list for query -mnemonic_set_query = mnemonic_set_database + mnemonic_for_conditions diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/process_data.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/process_data.py deleted file mode 100644 index f265c33f7..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/process_data.py +++ /dev/null @@ -1,344 +0,0 @@ -"""This module holds functions for miri data trending - -All functions in this module are tailored for the miri datatrending application. -Detailed descriptions are given for every function individually. - -------- - - Daniel Kühbacher - -Use ---- - -Dependencies ------------- -MIRI_trend_requestsDRAFT1900201.docx - -References ----------- - -Notes ------ - -""" - -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.condition as cond -import statistics -from collections import defaultdict - - -def extract_data(condition, mnemonic): - '''Function extracts data from given mnemmonic at a given condition - Parameters - ---------- - condition : object - conditon object that holds one or more subconditions - mnemonic : AstropyTable - holds single table with mnemonic data - Return - ------ - temp : list or None - holds data that applies to given condition - ''' - temp = [] - - # look for all values that fit to the given conditions - for element in mnemonic: - if condition.state(float(element['time'])): - temp.append(float(element['value'])) - - # return temp is one ore more values fit to the condition - # return None if no applicable data was found - if len(temp) > 0: - return temp - else: - return None - - -def lamp_distinction(caa_flag, lamp_sel, lamp_curr, lamp_volt): - """Distincts over all calibration lamps and returns representative current means - each - Parameters - ---------- - """ - - # initilize empty dict - lamp_values = defaultdict(list) - - for index, flag in enumerate(caa_flag): - - if flag['value'] == 'ON': - - # initialize lamp value to default - current_lamp = "default" - - # find current lamp value - for lamp in lamp_sel: - if lamp['time'] <= flag['time']: - current_lamp = lamp['value'] - - # go to next Value if dummy lamps are activated - if (current_lamp == 'NO_LAMP') or (current_lamp == 'DUMMY'): - continue - - # define on_time of current lamp - try: - start_time = flag['time'] - - i = 1 - if caa_flag[index+i]['value'] == 'OFF': - end_time = caa_flag[index+1]['time'] - else: - i += 1 - - except IndexError: - break - - # append and evaluate current and voltage values - temp_curr = [] - temp_volt = [] - - # append current values to list - for curr in lamp_curr: - if curr['time'] >= start_time: - if curr['time'] < end_time: - temp_curr.append(float(curr['value'])) - else: - break - # append voltage values to list - for volt in lamp_volt: - if volt['time'] >= start_time: - if volt['time'] < end_time: - temp_volt.append(float(volt['value'])) - else: - break - - lamp_data = [] - # append current values - lamp_data.append(start_time) - lamp_data.append(end_time) - lamp_data.append(len(temp_curr)) - lamp_data.append(statistics.mean(temp_curr)) - lamp_data.append(statistics.stdev(temp_curr)) - # append voltage values - lamp_data.append(len(temp_volt)) - lamp_data.append(statistics.mean(temp_volt)) - lamp_data.append(statistics.stdev(temp_volt)) - lamp_values[current_lamp].append((lamp_data)) - - return lamp_values - - -def extract_filterpos(move_stat, wheel_pos, wheel_val): - '''Extracts ratio values which correspond to given position values and their - proposed nominals - Parameters - ---------- - condition : object - conditon object that holds one or more subconditions - nominals : dict - holds nominal values for all wheel positions - ratio_mem : AstropyTable - holds ratio values of one specific mnemonic - pos_mem : AstropyTable - holds pos values of one specific mnemonic - Return - ------ - pos_values : dict - holds ratio values and times with corresponding positionlabel as key - ''' - - # initilize empty dict for assigned ratio values - pos_values = defaultdict(list) - - for index, stat in enumerate(move_stat): - - # raise warning if position is UNKNOWN - if stat['value'] == "SUCCESS": - - # initialize lamp value to default - current_pos = "default" - pos_val = 0 - pos_time = 0 - - # Evaluate current position - for pos in wheel_pos: - if pos['time'] <= stat['time']: - current_pos = pos['value'] - if pos['time'] > stat['time']: - break - - # Evaluate corresponding value - for val in wheel_val: - if val['time'] <= stat['time']: - pos_val = val['value'] - pos_time = val['time'] - if val['time'] > stat['time']: - break - - print (current_pos, pos_val, pos_time) - - if current_pos != 'default': - pos_values[current_pos].append((pos_time, pos_val)) - else: - continue - - return pos_values - - -def once_a_day_routine(mnemonic_data): - '''Routine for processing a 15min data file once a day - Parameters - ---------- - mnemonic_data : dict - dict holds time and value in an astropy table - with correspining identifier as key - Return - ------ - return_data : dict - Holds extracted data with applied conditions - ''' - - # abbreviate attribute - m = mnemonic_data - return_data = dict() - - ########################################################################### - con_set_1 = [cond.unequal(m.mnemonic('INRSD_EXP_STAT'), 'STARTED')] - # setup condition - condition_1 = cond.condition(con_set_1) - - for identifier in mn.mnemonic_cond_1: - data = extract_data(condition_1, m.mnemonic(identifier)) - if data is not None: - return_data.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - del condition_1 - - ########################################################################### - con_set_2 = [cond.equal(m.mnemonic('INRSH_LAMP_SEL'), 'NO_LAMP')] - # setup condition - condition_2 = cond.condition(con_set_2) - - for identifier in mn.mnemonic_cond_2: - data = extract_data(condition_2, m.mnemonic(identifier)) - if data is not None: - return_data.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - del condition_2 - - ########################################################################### - con_set_3 = [cond.unequal(m.mnemonic('INRSM_MOVE_STAT'), 'STARTED')] - # setup condition - condition_3 = cond.condition(con_set_3) - - for identifier in mn.mnemonic_cond_3: - data = extract_data(condition_3, m.mnemonic(identifier)) - if data is not None: - return_data.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - del condition_3 - - return return_data - - -def whole_day_routine(mnemonic_data): - '''Proposed routine for processing a 15min data file once a day - - Parameters - ---------- - mnemonic_data : dict - dict holds time and value in an astropy table - with correspining identifier as key - - Return - ------ - data_cond_1 : dict - holds extracted data with condition 1 applied - data_cond_1 : dict - holds extracted data with condition 2 applied - ''' - - # abbreviate attribute - m = mnemonic_data - return_data = dict() - - ########################################################################### - con_set_ft_10 = [cond.equal(m.mnemonic('ICTM_RT_FILTER'), 10, stringval=False)] - # setup condition - condition_ft_10 = cond.condition(con_set_ft_10) - - for identifier in mn.mnemonic_ft10: - data = extract_data(condition_ft_10, m.mnemonic(identifier)) - if data is not None: - return_data.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - del condition_ft_10 - - ########################################################################## - con_set_caa = [cond.equal(m.mnemonic('INRSH_CAA_PWRF_ST'), 'ON')] - # setup condition - condition_caa = cond.condition(con_set_caa) - - for identifier in mn.mnemonic_caa: - data = extract_data(condition_caa, m.mnemonic(identifier)) - - if data is not None: - return_data.update({identifier: data}) - else: - print("no data for {}".format(identifier)) - - del condition_caa - - ########################################################################### - data_lamps = lamp_distinction(m.mnemonic('INRSI_CAA_ON_FLAG'), - m.mnemonic('INRSH_LAMP_SEL'), - m.mnemonic('INRSI_C_CAA_CURRENT'), - m.mnemonic('INRSI_C_CAA_VOLTAGE')) - - return return_data, data_lamps - - -def wheelpos_routine(mnemonic_data): - '''Proposed routine for positionsensors each day - Parameters - ---------- - mnemonic_data : dict - dict holds time and value in an astropy table - with correspining identifier as key - Return - ------ - FW : dict - holds FW ratio values and times with corresponding positionlabel as key - GW14 : dict - holds GW14 ratio values and times with corresponding positionlabel as key - GW23 : dict - holds GW23 ratio values and times with corresponding positionlabel as key - CCC : dict - holds CCC ratio values and times with corresponding positionlabel as key - ''' - - # abbreviate attribute - m = mnemonic_data - - FW = extract_filterpos(m.mnemonic('INRSI_FWA_MOVE_ST'), - m.mnemonic('INRSI_FWA_MECH_POS'), - m.mnemonic('INRSI_C_FWA_POSITION')) - - GWX = extract_filterpos(m.mnemonic('INRSI_GWA_MOVE_ST'), - m.mnemonic('INRSI_GWA_MECH_POS'), - m.mnemonic('INRSI_C_GWA_X_POSITION')) - - GWY = extract_filterpos(m.mnemonic('INRSI_GWA_MOVE_ST'), - m.mnemonic('INRSI_GWA_MECH_POS'), - m.mnemonic('INRSI_C_GWA_Y_POSITION')) - - return FW, GWX, GWY - - -if __name__ == '__main__': - pass diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/sql_interface.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/sql_interface.py deleted file mode 100644 index daa871b45..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/utils/sql_interface.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Module holds functions to generate and access sqlite databases - -The module is tailored for use in miri data trending. It holds functions to -create and close connections to a sqlite database. Calling the module itself -creates a sqlite database with specific tables used at miri data trending. - -Authors -------- - - Daniel Kühbacher - -Use ---- - -Dependencies ------------- - import mnemonics as m - -References ----------- - -Notes ------ - -""" -import os -import sqlite3 -from sqlite3 import Error - -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as m -from jwql.utils.utils import get_config - - -def create_connection(db_file): - '''Sets up a connection or builds database - Parameters - ---------- - db_file : string - represents filename of database - Return - ------ - conn : DBobject or None - Connection object or None - ''' - try: - conn = sqlite3.connect(db_file) - print('Connected to database "{}"'.format(db_file)) - return conn - except Error as e: - print(e) - return None - - -def close_connection(conn): - '''Closes connection to database - Parameters - ---------- - conn : DBobject - Connection object to be closed - ''' - conn.close() - print('Connection closed') - - -def add_data(conn, mnemonic, data): - '''Add data of a specific mnemonic to database if it not exists - Parameters - ---------- - conn : DBobject - connection object to access database - mnemonic : string - identifies the table - data : list - specifies the data - ''' - - c = conn.cursor() - - # check if data already exists (start_time as identifier) - c.execute('SELECT id from {} WHERE start_time= {}'.format(mnemonic, data[0])) - temp = c.fetchall() - - if len(temp) == 0: - c.execute('INSERT INTO {} (start_time,end_time,data_points,average,deviation) \ - VALUES (?,?,?,?,?)'.format(mnemonic), data) - conn.commit() - else: - print('data for {} already exists'.format(mnemonic)) - - -def add_wheel_data(conn, mnemonic, data): - '''Add data of a specific wheel position to database if it not exists - Parameters - ---------- - conn : DBobject - connection object to access database - mnemonic : string - identifies the table - data : list - specifies the data - ''' - - c = conn.cursor() - - # check if data already exists (start_time) - c.execute('SELECT id from {} WHERE timestamp = {}'.format(mnemonic, data[0])) - temp = c.fetchall() - - if len(temp) == 0: - c.execute('INSERT INTO {} (timestamp, value) \ - VALUES (?,?)'.format(mnemonic), data) - conn.commit() - else: - print('data already exists') - - -def main(): - ''' Creates SQLite database with tables proposed in mnemonics.py''' - - __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - - # generate paths - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') - - conn = create_connection(DATABASE_FILE) - - c = conn.cursor() - - for mnemonic in m.mnemonic_set_database: - try: - c.execute('CREATE TABLE IF NOT EXISTS {} ( \ - id INTEGER, \ - start_time REAL, \ - end_time REAL, \ - data_points INTEGER, \ - average REAL, \ - deviation REAL, \ - performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ - PRIMARY KEY (id));'.format(mnemonic)) - except Error as e: - print('e') - - for mnemonic in m.mnemonic_wheelpositions: - try: - c.execute('CREATE TABLE IF NOT EXISTS {} ( \ - id INTEGER, \ - timestamp REAL, \ - value REAL, \ - performed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\ - PRIMARY KEY (id));'.format(mnemonic)) - except Error as e: - print('e') - - print("Database initial setup complete") - conn.commit() - close_connection(conn) - - -# sets up database if called as main -if __name__ == "__main__": - main() - print("sql_interface.py done") diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/wheel_to_db.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/wheel_to_db.py deleted file mode 100644 index 247160618..000000000 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/wheel_to_db.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import glob -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql -import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.csv_to_AstropyTable as apt -from jwql.utils.utils import get_config - -from jwql.instrument_monitors.nirspec_monitors.data_trending.utils.process_data import wheelpos_routine - -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - -# point to the directory where your files are located! -directory = os.path.join(get_config()['outputs'], 'nirspec_data_trending', 'nirspec_wheels', '*.CSV') - -# here some some files contain the same data but they are all incomplete -# in order to generate a full database we have to import all of them -filenames = glob.glob(directory) - - -def process_file(conn, path): - - # import mnemonic data and append dict to variable below - m_raw_data = apt.mnemonics(path) - - # process raw data with once a day routine - FW, GWX, GWY = wheelpos_routine(m_raw_data) - - for key, values in FW.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_FWA_POSITION_{}'.format(key), data) - - for key, values in GWX.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_GWA_X_POSITION_{}'.format(key), data) - - for key, values in GWY.items(): - for data in values: - sql.add_wheel_data(conn, 'INRSI_C_GWA_Y_POSITION_{}'.format(key), data) - - -def main(): - # generate paths - DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database') - DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db') - - # connect to temporary database - conn = sql.create_connection(DATABASE_FILE) - - ''' - path = directory + test - process_file(conn, path) - ''' - # do for every file in list above - for path in filenames: - process_file(conn, path) - - # close connection - sql.close_connection(conn) - print("done") - - -if __name__ == "__main__": - main() diff --git a/jwql/instrument_monitors/nirspec_monitors/ta_monitors/msata_monitor.py b/jwql/instrument_monitors/nirspec_monitors/ta_monitors/msata_monitor.py new file mode 100755 index 000000000..55abcf95e --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/ta_monitors/msata_monitor.py @@ -0,0 +1,1402 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + + +# HISTORY +# Feb 2022 - Vr. 1.0: Completed initial version +# Jul 2022 - Vr. 1.1: Changed keywords to final flight values +# Aug 2022 - Vr. 1.2: Modified plots according to NIRSpec team input +# Sep 2022 - Vr. 1.3: Modified ColumnDataSource so that data could be recovered +# from an html file of a previous run of the monitor and +# included the code to read and format the data from the html file + + +""" +This module contains the code for the NIRSpec Multi Shutter Array Target +Acquisition (MSATA) monitor, which monitors the TA offsets, including +the roll for MSATA. + +This monitor displays details of individual MSATA stars and details of +fitting and rejection procedure (least square fit). + +This monitor also displays V2, V3, and roll offsets over time. + +Author +______ + - Maria Pena-Guerrero + - Melanie Clarke + +Use +--- + This module can be used from the command line as follows: + python msata_monitor.py + +""" + + +# general imports +import json +import os +import logging +import shutil +from datetime import datetime, timezone, timedelta +from random import randint + +import numpy as np +import pandas as pd +from astropy.time import Time +from astropy.io import fits +from bokeh.embed import components +from bokeh.layouts import gridplot, layout +from bokeh.models import ( + ColumnDataSource, Range1d, CustomJS, CustomJSFilter, CDSView, + Span, Label, DateRangeSlider) +from bokeh.models.tools import HoverTool, BoxSelectTool +from bokeh.plotting import figure, save, output_file +from bs4 import BeautifulSoup +from sqlalchemy.sql.expression import and_ + +# jwql imports +from jwql.database.database_interface import session, engine +from jwql.database.database_interface import NIRSpecTAQueryHistory, NIRSpecTAStats +from jwql.utils import monitor_utils +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config + + +class MSATA(): + """ Class for executing the NIRSpec MSATA monitor. + + This class will search for new MSATA current files in the file systems + for NIRSpec and will run the monitor on these files. The monitor will + extract the TA information from the fits file headers and perform all + statistical measurements. Results will be saved to the MSATA database. + + Attributes + ---------- + output_dir : str + Path into which outputs will be placed. + + data_dir : str + Path into which new dark files will be copied to be worked on. + + aperture : str + Name of the aperture used for the dark current (i.e. + "NRS_FULL_MSA", "NRS_S1600A1_SLIT") + + query_start : float + MJD start date to use for querying MAST. + + query_end : float + MJD end date to use for querying MAST. + + """ + + def __init__(self): + """ Initialize an instance of the MSATA class """ + # Very beginning of intake of images: Jan 28, 2022 == First JWST images (MIRI) + self.query_very_beginning = 59607.0 + + # dictionary to define required keywords to extract MSATA data and where it lives + self.keywds2extract = {'FILENAME': {'loc': 'main_hdr', 'alt_key': None, 'name': 'filename', 'type': str}, + 'DATE-BEG': {'loc': 'main_hdr', 'alt_key': None, 'name': 'date_obs', 'type': str}, + 'OBS_ID': {'loc': 'main_hdr', 'alt_key': None, 'name': 'visit_id', 'type': str}, + 'FILTER': {'loc': 'main_hdr', 'alt_key': 'FWA_POS', 'name': 'tafilter', 'type': str}, + 'DETECTOR': {'loc': 'main_hdr', 'alt_key': None, 'name': 'detector', 'type': str}, + 'READOUT': {'loc': 'main_hdr', 'alt_key': 'READPATT', 'name': 'readout', 'type': str}, + 'SUBARRAY': {'loc': 'main_hdr', 'alt_key': None, 'name': 'subarray', 'type': str}, + 'NUMREFST': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'num_refstars', 'type': int}, + 'TASTATUS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'ta_status', 'type': str}, + 'STAT_RSN': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'status_rsn', 'type': str}, + 'V2HFOFFS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'v2halffacet', 'type': float}, + 'V3HFOFFS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'v3halffacet', 'type': float}, + 'V2MSACTR': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'v2msactr', 'type': float}, + 'V3MSACTR': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'v3msactr', 'type': float}, + 'FITXOFFS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'lsv2offset', 'type': float}, + 'FITYOFFS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'lsv3offset', 'type': float}, + 'OFFSTMAG': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'lsoffsetmag', 'type': float}, + 'FITROFFS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'lsrolloffset', 'type': float}, + 'FITXSIGM': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'lsv2sigma', 'type': float}, + 'FITYSIGM': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'lsv3sigma', 'type': float}, + 'ITERATNS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'lsiterations', 'type': int}, + 'GUIDERID': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'guidestarid', 'type': str}, + 'IDEAL_X': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'guidestarx', 'type': float}, + 'IDEAL_Y': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'guidestary', 'type': float}, + 'IDL_ROLL': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'guidestarroll', 'type': float}, + 'SAM_X': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'samx', 'type': float}, + 'SAM_Y': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'samy', 'type': float}, + 'SAM_ROLL': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'samroll', 'type': float}, + 'box_peak_value': {'loc': 'ta_table', 'alt_key': None, 'name': 'box_peak_value', 'type': float}, + 'reference_star_mag': {'loc': 'ta_table', 'alt_key': None, 'name': 'reference_star_mag', 'type': float}, + 'convergence_status': {'loc': 'ta_table', 'alt_key': None, 'name': 'convergence_status', 'type': str}, + 'reference_star_number': {'loc': 'ta_table', 'alt_key': None, 'name': 'reference_star_number', 'type': int}, + 'lsf_removed_status': {'loc': 'ta_table', 'alt_key': None, 'name': 'lsf_removed_status', 'type': str}, + 'lsf_removed_reason': {'loc': 'ta_table', 'alt_key': None, 'name': 'lsf_removed_reason', 'type': str}, + 'lsf_removed_x': {'loc': 'ta_table', 'alt_key': None, 'name': 'lsf_removed_x', 'type': float}, + 'lsf_removed_y': {'loc': 'ta_table', 'alt_key': None, 'name': 'lsf_removed_y', 'type': float}, + 'planned_v2': {'loc': 'ta_table', 'alt_key': None, 'name': 'planned_v2', 'type': float}, + 'planned_v3': {'loc': 'ta_table', 'alt_key': None, 'name': 'planned_v3', 'type': float}, + 'FITTARGS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'stars_in_fit', 'type': int}} + + # initialize attributes to be set later + self.source = None + self.share_tools = [] + self.date_range = None + self.date_filter = None + self.date_view = None + + def get_tainfo_from_fits(self, fits_file): + """ Get the TA information from the fits file + Parameters + ---------- + fits_file: str + This is the fits file for a specific MSATA + + Returns + ------- + msata_info: list, contains main header, and TA extension header and data + """ + msata = False + with fits.open(fits_file) as ff: + # make sure this is a MSATA file + for hdu in ff: + if 'MSA_TARG_ACQ' in hdu.name: + msata = True + break + if not msata: + return None + main_hdr = ff[0].header + try: + ta_hdr = ff['MSA_TARG_ACQ'].header + ta_table = ff['MSA_TARG_ACQ'].data + except KeyError: + no_ta_ext_msg = 'No TARG_ACQ extension in file '+fits_file + return no_ta_ext_msg + msata_info = [main_hdr, ta_hdr, ta_table] + return msata_info + + def get_msata_data(self, new_filenames): + """ Get the TA information from the MSATA text table + Parameters + ---------- + new_filenames: list + List of MSATA file names to consider + + Returns + ------- + msata_df: data frame object + Pandas data frame containing all MSATA data + """ + # fill out the dictionary to create the dataframe + msata_dict, no_ta_ext_msgs = {}, [] + for fits_file in new_filenames: + msata_info = self.get_tainfo_from_fits(fits_file) + if isinstance(msata_info, str): + no_ta_ext_msgs.append(msata_info) + continue + if msata_info is None: + continue + main_hdr, ta_hdr, ta_table = msata_info + file_data_dict, file_errs = {}, [] + for key, key_dict in self.keywds2extract.items(): + key_name = key_dict['name'] + if key_name not in file_data_dict: + file_data_dict[key_name] = [] + ext = main_hdr + if key_dict['loc'] == 'ta_hdr': + ext = ta_hdr + if key_dict['loc'] == 'ta_table': + ext = ta_table + try: + val = ext[key] + if key == 'filename': + val = fits_file + except KeyError: + if key_dict['alt_key'] is not None: + try: + val = ext[key_dict['alt_key']] + except (NameError, TypeError) as error: + msg = error+' in file '+fits_file + file_errs.append(msg) + break + else: + msg = 'Keyword '+key+' not found. Skipping file '+fits_file + file_errs.append(msg) + break + """ UNCOMMENT THIS BLOCK IN CASE WE DO WANT TO GET RID OF the 999.0 values + # remove the 999 values for arrays + if isinstance(val, np.ndarray): + if val.dtype.char == 'd' or val.dtype.char == 'f': + val = np.where(abs(val) != 999.0, val, 0.0) + # remove the 999 from single values + elif not isinstance(val, str): + if float(abs(val)) == 999.0: + val = 0.0 + """ + file_data_dict[key_name].append(val) + # only update the data dictionary if all the keywords were found + if len(file_errs) == 0: + # if starting from scratch, simply update + if len(msata_dict) == 0: + msata_dict.update(file_data_dict) + # if msata_dict is not empty then extend the lists + else: + for msata_dict_key in msata_dict: + msata_dict[msata_dict_key].extend(file_data_dict[msata_dict_key]) + else: + no_ta_ext_msgs.extend(file_errs) + # create the pandas dataframe + msata_df = pd.DataFrame(msata_dict) + return msata_df, no_ta_ext_msgs + + def add_time_column(self): + """Add time column to data source, to be used by all plots.""" + date_obs = self.source.data['date_obs'] + if 'time_arr' not in self.source.data: + time_arr = [] + for do_str in date_obs: + # convert time string into an array of time (this is in UT) + t = datetime.fromisoformat(do_str) + time_arr.append(t) + + # add to the bokeh data structure + self.source.data["time_arr"] = time_arr + + def plt_status(self): + """ Plot the MSATA status versus time. + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + ta_status = self.source.data['ta_status'] + # check if this column exists in the data already (the other 2 will exist too), else create it + if 'bool_status' not in self.source.data: + # bokeh does not like to plot strings, turn into numbers + number_status, status_colors = [], [] + for tas in ta_status: + if tas.lower() == 'unsuccessful': + number_status.append(0.0) + status_colors.append('red') + elif 'progress' in tas.lower(): + number_status.append(0.5) + status_colors.append('gray') + else: + number_status.append(1.0) + status_colors.append('blue') + + # add these to the bokeh data structure + self.source.data["number_status"] = number_status + self.source.data["status_colors"] = status_colors + + # create a new bokeh plot + plot = figure(title="MSATA Status [Success=1, In Progress=0.5, Fail=0]", x_axis_label='Time', + y_axis_label='MSATA Status', x_axis_type='datetime',) + plot.y_range = Range1d(-0.5, 1.5) + plot.circle(x='time_arr', y='number_status', source=self.source, + color='status_colors', size=7, fill_alpha=0.3, view=self.date_view) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('TA status', '@ta_status'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_residual_offsets(self): + """ Plot the residual Least Squares V2 and V3 offsets + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares Residual V2-V3 Offsets", + x_axis_label='Least Squares Residual V2 Offset', + y_axis_label='Least Squares Residual V3 Offset') + plot.circle(x='lsv2offset', y='lsv3offset', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + + v2halffacet, v3halffacet = self.source.data['v2halffacet'], self.source.data['v3halffacet'] + xstart, ystart, ray_length = -1 * v2halffacet[0], -1 * v3halffacet[0], 0.05 + plot.ray(x=xstart - ray_length / 2.0, y=ystart, length=ray_length, angle_units="deg", + angle=0, line_color='purple', line_width=3) + plot.ray(x=xstart, y=ystart - ray_length / 2.0, length=ray_length, angle_units="deg", + angle=90, line_color='purple', line_width=3) + hflabel = Label(x=xstart / 3.0, y=ystart, y_units='data', text='-V2, -V3 half-facets values') + plot.add_layout(hflabel) + plot.x_range = Range1d(-0.5, 0.5) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin lines + vline = Span(location=0, dimension='height', line_color='black', line_width=0.7) + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([vline, hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS V2 offset', '@lsv2offset'), + ('LS V3 offset', '@lsv3offset'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_v2offset_time(self): + """ Plot the residual V2 versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares V2 Offset vs Time", x_axis_label='Time', + y_axis_label='Least Squares Residual V2 Offset', x_axis_type='datetime') + plot.circle(x='time_arr', y='lsv2offset', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + time_arr, v2halffacet = self.source.data['time_arr'], self.source.data['v2halffacet'] + hfline = Span(location=-1 * v2halffacet[0], dimension='width', line_color='green', line_width=3) + plot.renderers.extend([hline, hfline]) + hflabel = Label(x=time_arr[-1], y=-1 * v2halffacet[0], y_units='data', text='-V2 half-facet value') + plot.add_layout(hflabel) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS V2 offset', '@lsv2offset'), + ('LS V3 offset', '@lsv3offset'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_v3offset_time(self): + """ Plot the residual V3 versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares V3 Offset vs Time", x_axis_label='Time', + y_axis_label='Least Squares Residual V3 Offset', x_axis_type='datetime') + plot.circle(x='time_arr', y='lsv3offset', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + time_arr, v3halffacet = self.source.data['time_arr'], self.source.data['v3halffacet'] + hfline = Span(location=-1 * v3halffacet[0], dimension='width', line_color='green', line_width=3) + plot.renderers.extend([hline, hfline]) + hflabel = Label(x=time_arr[-1], y=-1 * v3halffacet[0], y_units='data', text='-V3 half-facet value') + plot.add_layout(hflabel) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS V2 offset', '@lsv2offset'), + ('LS V3 offset', '@lsv3offset'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_lsv2v3offsetsigma(self): + """ Plot the residual Least Squares V2 and V3 sigma offsets + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares Residual V2-V3 Sigma Offsets", + x_axis_label='Least Squares Residual V2 Sigma Offset', + y_axis_label='Least Squares Residual V3 Sigma Offset') + plot.circle(x='lsv2sigma', y='lsv3sigma', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.x_range = Range1d(-0.1, 0.1) + plot.y_range = Range1d(-0.1, 0.1) + + # mark origin lines + vline = Span(location=0, dimension='height', line_color='black', line_width=0.7) + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([vline, hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS V2 offset', '@lsv2offset'), + ('LS V2 sigma', '@lsv2sigma'), + ('LS V3 offset', '@lsv3offset'), + ('LS V3 sigma', '@lsv3sigma'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_res_offsets_corrected(self): + """ Plot the residual Least Squares V2 and V3 offsets corrected by the half-facet + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + lsv2offset, lsv3offset = self.source.data['lsv2offset'], self.source.data['lsv3offset'] + v2halffacet, v3halffacet = self.source.data['v2halffacet'], self.source.data['v3halffacet'] + + # check if this column exists in the data already, else create it + if 'v2_half_fac_corr' not in self.source.data: + v2_half_fac_corr, v3_half_fac_corr = [], [] + for idx, v2hf in enumerate(v2halffacet): + v3hf = v3halffacet[idx] + v2_half_fac_corr.append(lsv2offset[idx] + v2hf) + v3_half_fac_corr.append(lsv3offset[idx] + v3hf) + + # add these to the bokeh data structure + self.source.data["v2_half_fac_corr"] = v2_half_fac_corr + self.source.data["v3_half_fac_corr"] = v3_half_fac_corr + + # create a new bokeh plot + plot = figure(title="MSATA Least Squares Residual V2-V3 Offsets Half-facet corrected", + x_axis_label='Least Squares Residual V2 Offset + half-facet', + y_axis_label='Least Squares Residual V3 Offset + half-facet') + plot.circle(x='v2_half_fac_corr', y='v3_half_fac_corr', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.x_range = Range1d(-0.5, 0.5) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin lines + vline = Span(location=0, dimension='height', line_color='black', line_width=0.7) + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([vline, hline]) + xstart, ystart, ray_length = -1 * v2halffacet[0], -1 * v3halffacet[0], 0.05 + plot.ray(x=xstart - ray_length / 2.0, y=ystart, length=ray_length, angle_units="deg", + angle=0, line_color='purple', line_width=3) + plot.ray(x=xstart, y=ystart - ray_length / 2.0, length=ray_length, angle_units="deg", + angle=90, line_color='purple', line_width=3) + hflabel = Label(x=xstart / 3.0, y=ystart, y_units='data', text='-V2, -V3 half-facets values') + plot.add_layout(hflabel) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS V2 offset', '@lsv2offset'), + ('LS V3 offset', '@lsv3offset'), + ('V2 half-facet', '@v2halffacet'), + ('V3 half-facet', '@v3halffacet'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_v2offsigma_time(self): + """Plot the residual Least Squares V2 sigma Offset versus time + + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares V2 Sigma Offset vs Time", x_axis_label='Time', + y_axis_label='Least Squares Residual V2 Sigma Offset', x_axis_type='datetime') + plot.circle(x='time_arr', y='lsv2sigma', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-0.1, 0.1) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS V2 offset', '@lsv2offset'), + ('LS V2 sigma', '@lsv2sigma'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_v3offsigma_time(self): + """Plot the residual Least Squares V3 Offset versus time + + Returns + ------- + p: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares V3 Sigma Offset vs Time", + x_axis_label='Time', + y_axis_label='Least Squares Residual V3 Sigma Offset', + x_axis_type='datetime') + plot.circle(x='time_arr', y='lsv3sigma', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-0.1, 0.1) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS V3 offset', '@lsv3offset'), + ('LS V3 sigma', '@lsv3sigma'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_roll_offset(self): + """ Plot the residual Least Squares roll Offset versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares Roll Offset vs Time", x_axis_label='Time', + y_axis_label='Least Squares Residual Roll Offset', x_axis_type='datetime') + plot.circle(x='time_arr', y='lsrolloffset', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-600.0, 600.0) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + + # Maximum accepted roll line and label + time_arr = self.source.data['time_arr'] + arlinepos = Span(location=120, dimension='width', line_color='green', line_width=3) + arlineneg = Span(location=-120, dimension='width', line_color='green', line_width=3) + arlabel = Label(x=time_arr[-1], y=125, y_units='data', text='Max accepted roll') + plot.add_layout(arlabel) + plot.renderers.extend([hline, arlinepos, arlineneg]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS V2 offset', '@lsv2offset'), + ('LS V3 offset', '@lsv3offset'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_lsoffsetmag(self): + """ Plot the residual Least Squares Total Slew Magnitude Offset versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="MSATA Least Squares Total Magnitude of the Linear V2, V3 Offset Slew vs Time", x_axis_label='Time', + y_axis_label='sqrt((V2_off)**2 + (V3_off)**2)', x_axis_type='datetime') + plot.circle(x='time_arr', y='lsoffsetmag', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('LS roll offset', '@lsrolloffset'), + ('LS slew mag offset', '@lsoffsetmag'), + ('LS V2 offset', '@lsv2offset'), + ('LS V3 offset', '@lsv3offset'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_tot_number_of_stars(self): + """ Plot the total number of stars used versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # get the number of stars per array + visit_id = self.source.data['visit_id'] + reference_star_number = self.source.data['reference_star_number'] + + # check if this column exists in the data already, else create it + if 'tot_number_of_stars' not in self.source.data: + # create the list of color per visit and tot_number_of_stars + colors_list, tot_number_of_stars = [], [] + color_dict = {} + for i, vid in enumerate(visit_id): + tot_stars = len(reference_star_number[i]) + tot_number_of_stars.append(tot_stars) + ci = '#%06X' % randint(0, 0xFFFFFF) + if vid not in color_dict: + color_dict[vid] = ci + colors_list.append(color_dict[vid]) + + # add these to the bokeh data structure + self.source.data["tot_number_of_stars"] = tot_number_of_stars + self.source.data["colors_list"] = colors_list + + # create a new bokeh plot + plot = figure(title="Total Number of Measurements vs Time", x_axis_label='Time', + y_axis_label='Total number of measurements', x_axis_type='datetime') + plot.circle(x='time_arr', y='tot_number_of_stars', source=self.source, + color='colors_list', size=7, fill_alpha=0.3, view=self.date_view) + plot.triangle(x='time_arr', y='stars_in_fit', source=self.source, + color='black', size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(0.0, 40.0) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('Detector', '@detector'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Subarray', '@subarray'), + ('Stars in fit', '@stars_in_fit'), + ('LS roll offset', '@lsrolloffset'), + ('LS slew mag offset', '@lsoffsetmag'), + ('LS V2 offset', '@lsv2offset'), + ('LS V3 offset', '@lsv3offset'), + ('--------', '----------------')] + plot.add_tools(hover) + + return plot + + def plt_mags_time(self): + """ Plot the star magnitudes versus time + Parameters + ---------- + source: bokeh data object for plotting + Returns + ------- + plot: bokeh plot object + """ + visit_id = self.source.data['visit_id'] + lsf_removed_status = self.source.data['lsf_removed_status'] + lsf_removed_reason = self.source.data['lsf_removed_reason'] + lsf_removed_x = self.source.data['lsf_removed_x'] + lsf_removed_y = self.source.data['lsf_removed_y'] + planned_v2 = self.source.data['planned_v2'] + planned_v3 = self.source.data['planned_v3'] + reference_star_number = self.source.data['reference_star_number'] + box_peak_value = self.source.data['box_peak_value'] + date_obs, time_arr = self.source.data['date_obs'], self.source.data['time_arr'] + colors_list = self.source.data['colors_list'] + detector_list = self.source.data['detector'] + filename = self.source.data['filename'] + + # create the structure matching the number of visits and reference stars + new_colors_list, vid, dobs, tarr, star_no, status = [], [], [], [], [], [] + peaks, stars_v2, stars_v3, det, fnames = [], [], [], [], [] + for i, _ in enumerate(visit_id): + v, d, t, c, s, x, y, dt, fn = [], [], [], [], [], [], [], [], [] + for j in range(len(reference_star_number[i])): + v.append(visit_id[i]) + d.append(date_obs[i]) + t.append(time_arr[i]) + c.append(colors_list[i]) + dt.append(detector_list[i]) + fn.append(filename[i]) + if 'not_removed' in lsf_removed_status[i][j]: + s.append('SUCCESS') + x.append(planned_v2[i][j]) + y.append(planned_v3[i][j]) + else: + s.append(lsf_removed_reason[i][j]) + x.append(lsf_removed_x[i][j]) + y.append(lsf_removed_y[i][j]) + vid.extend(v) + dobs.extend(d) + tarr.extend(t) + star_no.extend(reference_star_number[i]) + status.extend(s) + new_colors_list.extend(c) + stars_v2.extend(x) + stars_v3.extend(y) + peaks.extend(box_peak_value[i]) + det.extend(dt) + fnames.extend(fn) + + # now create the mini ColumnDataSource for this particular plot + mini_source = {'vid': vid, 'star_no': star_no, 'status': status, + 'dobs': dobs, 'time_arr': tarr, 'det': det, 'fname': fnames, + 'peaks': peaks, 'colors_list': new_colors_list, + 'stars_v2': stars_v2, 'stars_v3': stars_v3} + mini_source = ColumnDataSource(data=mini_source) + + # hook up the date range slider to this source as well + callback = CustomJS(args=dict(s=mini_source), code=""" + s.change.emit(); + """) + self.date_range.js_on_change('value', callback) + mini_view = CDSView(source=mini_source, filters=[self.date_filter]) + + # create the bokeh plot + plot = figure(title="MSATA Counts vs Time", x_axis_label='Time', + y_axis_label='box_peak [Counts]', + x_axis_type='datetime') + plot.circle(x='time_arr', y='peaks', source=mini_source, + color='colors_list', size=7, fill_alpha=0.3, view=mini_view) + + # add count saturation warning lines + loc1, loc2, loc3 = 45000.0, 50000.0, 60000.0 + hline1 = Span(location=loc1, dimension='width', line_color='green', line_width=3) + hline2 = Span(location=loc2, dimension='width', line_color='yellow', line_width=3) + hline3 = Span(location=loc3, dimension='width', line_color='red', line_width=3) + plot.renderers.extend([hline1, hline2, hline3]) + label1 = Label(x=time_arr[-1], y=loc1, y_units='data', text='45000 counts') + label2 = Label(x=time_arr[-1], y=loc2, y_units='data', text='50000 counts') + label3 = Label(x=time_arr[-1], y=loc3, y_units='data', text='60000 counts') + plot.add_layout(label1) + plot.add_layout(label2) + plot.add_layout(label3) + plot.y_range = Range1d(-1000.0, 62000.0) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@fname'), + ('Visit ID', '@vid'), + ('Detector', '@det'), + ('Star No.', '@star_no'), + ('LS Status', '@status'), + ('Date-Obs', '@dobs'), + ('Box peak', '@peaks'), + ('Measured V2', '@stars_v2'), + ('Measured V3', '@stars_v3'), + ('--------', '----------------')] + plot.add_tools(hover) + + return plot + + def setup_date_range(self): + """Set up a date range filter, defaulting to the last week of data.""" + end_date = datetime.now(tz=timezone.utc) + one_week_ago = end_date.date() - timedelta(days=7) + first_data_point = np.min(self.source.data['time_arr']).date() + last_data_point = np.max(self.source.data['time_arr']).date() + if last_data_point < one_week_ago: + # keep at least one point in the plot if there was + # no TA data this week + start_date = last_data_point + else: + start_date = one_week_ago + + # allowed range is from the first ever data point to today + self.date_range = DateRangeSlider( + title="Date range displayed", start=first_data_point, + end=end_date, value=(start_date, end_date), step=1) + + callback = CustomJS(args=dict(s=self.source), code=""" + s.change.emit(); + """) + self.date_range.js_on_change('value', callback) + + self.date_filter = CustomJSFilter(args=dict(slider=self.date_range), code=""" + var indices = []; + var start = slider.value[0]; + var end = slider.value[1]; + + for (var i=0; i < source.get_length(); i++) { + if (source.data['time_arr'][i] >= start + && source.data['time_arr'][i] <= end) { + indices.push(true); + } else { + indices.push(false); + } + } + return indices; + """) + self.date_view = CDSView(source=self.source, filters=[self.date_filter]) + + def mk_plt_layout(self): + """Create the bokeh plot layout""" + self.source = ColumnDataSource(data=self.msata_data) + + # make sure all arrays are lists in order to later be able to read the data + # from the html file + for item in self.source.data: + if not isinstance(self.source.data[item], (str, float, int, list)): + self.source.data[item] = self.source.data[item].tolist() + + # add a time array to the data source + self.add_time_column() + + # set up selection tools to share + self.share_tools = [BoxSelectTool()] + + # set up a date range filter widget + self.setup_date_range() + + # set the output html file name and create the plot grid + output_file(self.output_file_name) + p1 = self.plt_status() + p2 = self.plt_residual_offsets() + p3 = self.plt_res_offsets_corrected() + p4 = self.plt_v2offset_time() + p5 = self.plt_v3offset_time() + p6 = self.plt_lsv2v3offsetsigma() + p7 = self.plt_v2offsigma_time() + p8 = self.plt_v3offsigma_time() + p9 = self.plt_roll_offset() + p10 = self.plt_lsoffsetmag() + p12 = self.plt_tot_number_of_stars() + p11 = self.plt_mags_time() + + # make grid + grid = gridplot([p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12], + ncols=2, merge_tools=False) + box_layout = layout(children=[self.date_range, grid]) + save(box_layout) + + # return the needed components for embedding the results in the MSATA html template + script, div = components(box_layout) + return script, div + + def identify_tables(self): + """Determine which database tables to use for a run of the TA monitor.""" + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument] + self.query_table = eval('{}TAQueryHistory'.format(mixed_case_name)) + self.stats_table = eval('{}TAStats'.format(mixed_case_name)) + + def most_recent_search(self): + """Query the query history database and return the information + on the most recent query for the given 'aperture_name' where + the msata monitor was executed. + + Returns + ------- + query_result : float + Date (in MJD) of the ending range of the previous MAST query + where the msata monitor was run. + """ + query = session.query(self.query_table).filter(and_(self.query_table.aperture == self.aperture, + self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() + + dates = np.zeros(0) + for instance in query: + dates = np.append(dates, instance.end_time_mjd) + + query_count = len(dates) + if query_count == 0: + query_result = self.query_very_beginning + logging.info(('\tNo query history for {}. Beginning search date will be set to {}.' + .format(self.aperture, self.query_very_beginning))) + else: + query_result = np.max(dates) + + return query_result + + def get_data_from_html(self, html_file): + """ + This function gets the data from the Bokeh html file created with + the NIRSpec TA monitor script. + Parameters + ---------- + html_file: str + File created by the monitor script + Returns + ------- + prev_data_dict: dict + Dictionary containing all data used in the plots + """ + + # open the html file and get the contents + htmlFileToBeOpened = open(html_file, "r") + contents = htmlFileToBeOpened.read() + soup = BeautifulSoup(contents, 'html.parser') + + # now read as python dictionary and search for the data + prev_data_dict = {} + html_data = json.loads(soup.find('script', type='application/json').string) + for key, val in html_data.items(): + if 'roots' in val: # this is a dictionary + if 'references' in val['roots']: + for item in val['roots']['references']: # this is a list + # each item of the list is a dictionary + for item_key, item_val in item.items(): + if 'data' in item_val: + # finally the data dictionary! + for data_key, data_val in item_val['data'].items(): + prev_data_dict[data_key] = data_val + # set to None if dictionary is empty + if not bool(prev_data_dict): + prev_data_dict = None + return prev_data_dict + + def construct_expected_data(self, keywd_dict, tot_number_of_stars): + """This function creates the list to append to the dictionary key in the expected format. + Parameters + ---------- + keywd_dict: dictonary + Dictionary corresponding to the file keyword + tot_number_of_stars: integer + Number of stars in the observation + Returns + ------- + list4dict: list + List to be appended to the data structure. Has the right length but no real values + """ + # set the value to add + val = -999 + list4dict = [] + # create either the list or return the right type of value + if keywd_dict['loc'] != 'ta_table': # these cases should be singe values per observation + if keywd_dict['type'] == float: + val = float(val) + if keywd_dict['type'] == str: + val = str(val) + list4dict = val + else: + for tns in tot_number_of_stars: # elements the list of lists should have + list2append = [] + for _ in range(tns): # elements each sublist should have + if keywd_dict['type'] == float: + val = float(val) + if keywd_dict['type'] == str: + val = str(val) + list2append.append(val) + list4dict.append(list2append) + return list4dict + + def prev_data2expected_format(self, prev_data_dict): + """Add all the necessary columns to match expected format to combine previous + and new data. + Parameters + ---------- + prev_data_dict: dictionary + Dictionary containing all data used in the Bokeh html file plots + Returns + ------- + prev_data: pandas dataframe + Contains all expected columns to be combined with the new data + latest_prev_obs: str + Date of the latest observation in the previously plotted data + """ + # remember that the time array created is in milliseconds, removing to get time object + time_in_millis = max(prev_data_dict['time_arr']) + latest_prev_obs = Time(time_in_millis / 1000., format='unix') + latest_prev_obs = latest_prev_obs.mjd + prev_data_expected_cols = {} + tot_number_of_stars = prev_data_dict['tot_number_of_stars'] + for file_keywd, keywd_dict in self.keywds2extract.items(): + key = keywd_dict['name'] + if key in prev_data_dict: + # case when all the info of all visits and ref stars is in the same list + if len(prev_data_dict[key]) > len(tot_number_of_stars): + correct_arrangement = [] + correct_start_idx, correct_end_idx = 0, tot_number_of_stars[0] + for idx, tns in enumerate(tot_number_of_stars): + list2append = prev_data_dict[key][correct_start_idx: correct_end_idx] + correct_arrangement.append(list2append) + correct_start_idx = correct_end_idx + correct_end_idx += tns + prev_data_expected_cols[key] = correct_arrangement + # case when the html stored thing is just an object but does not have data + elif len(prev_data_dict[key]) < len(tot_number_of_stars): + list4dict = self.construct_expected_data(keywd_dict, tot_number_of_stars) + prev_data_expected_cols[key] = list4dict + # case when nothing special to do + else: + prev_data_expected_cols[key] = prev_data_dict[key] + else: + list4dict = self.construct_expected_data(keywd_dict, tot_number_of_stars) + prev_data_expected_cols[key] = list4dict + # now convert to a panda dataframe to be combined with the new data + prev_data = pd.DataFrame(prev_data_expected_cols) + return prev_data, latest_prev_obs + + def pull_filenames(self, file_info): + """Extract filenames from the list of file information returned from + query_mast. + + Parameters + ---------- + file_info : dict + Dictionary of file information returned by ``query_mast`` + + Returns + ------- + files : list + List of filenames (without paths) extracted from ``file_info`` + """ + files = [] + for list_element in file_info: + if 'filename' in list_element: + files.append(list_element['filename']) + elif 'root_name' in list_element: + files.append(list_element['root_name']) + return files + + def get_uncal_names(self, file_list): + """Replace the last suffix for _uncal and return list. + Parameters + ---------- + file_list : list + List of fits files + Returns + ------- + good_files : list + Filtered list of uncal file names + """ + good_files = [] + for filename in file_list: + if filename.endswith('.fits'): + # MAST names look like: jw01133003001_02101_00001_nrs2_cal.fits + suffix2replace = filename.split('_')[-1] + filename = filename.replace(suffix2replace, 'uncal.fits') + else: + # rootnames look like: jw01133003001_02101_00001_nrs2 + filename += '_uncal.fits' + if filename not in good_files: + good_files.append(filename) + return good_files + + def update_ta_success_txtfile(self): + """Create a text file with all the failed and successful MSATA. + Parameters + ---------- + None + Returns + ------- + Nothing + """ + output_success_ta_txtfile = os.path.join(self.output_dir, "msata_success.txt") + # check if previous file exsists and read the data from it + if os.path.isfile(output_success_ta_txtfile): + # now rename the previous file, for backup + os.rename(output_success_ta_txtfile, os.path.join(self.output_dir, "prev_msata_success.txt")) + # get the new data + ta_success, ta_inprogress, ta_failure = [], [], [] + filenames, ta_status = self.msata_data.loc[:,'filename'], self.msata_data.loc[:,'ta_status'] + for fname, ta_stat in zip(filenames, ta_status): + # select the appropriate list to append to + if ta_stat == 'SUCCESSFUL': + ta_success.append(fname) + elif ta_stat == 'IN_PROGRESS': + ta_inprogress.append(fname) + else: + ta_failure.append(fname) + # find which one is the longest list (to make sure the other lists have the same length) + successes, inprogress, failures = len(ta_success), len(ta_inprogress), len(ta_failure) + longest_list = None + if successes >= inprogress: + longest_list = successes + else: + longest_list = inprogress + if longest_list < failures: + longest_list = failures + # match length of the lists + for ta_list in [ta_success, ta_inprogress, ta_failure]: + remaining_items = longest_list - len(ta_list) + if remaining_items > 0: + for _ in range(remaining_items): + ta_list.append("") + # write the new output file + with open(output_success_ta_txtfile, 'w+') as txt: + txt.write("# MSATA successes and failure file names \n") + filehdr1 = "# {} Total successful and {} total failed MSATA ".format(successes, failures) + filehdr2 = "# {:<50} {:<50} {:<50}".format("Successes", "In_Progress", "Failures") + txt.write(filehdr1 + "\n") + txt.write(filehdr2 + "\n") + for idx, suc in enumerate(ta_success): + line = "{:<50} {:<50} {:<50}".format(suc, ta_inprogress[idx], ta_failure[idx]) + txt.write(line + "\n") + + def read_existing_html(self): + """ + This function gets the data from the Bokeh html file created with + the NIRSpec TA monitor script. + """ + self.output_dir = os.path.join(get_config()['outputs'], 'msata_monitor') + ensure_dir_exists(self.output_dir) + + self.output_file_name = os.path.join(self.output_dir, "msata_layout.html") + if not os.path.isfile(self.output_file_name): + return 'No MSATA data available', '', '' + + # open the html file and get the contents + with open(self.output_file_name, "r") as html_file: + contents = html_file.read() + + soup = BeautifulSoup(contents, 'html.parser').body + + # find the script elements + script1 = str(soup.find('script', type='text/javascript')) + script2 = str(soup.find('script', type='application/json')) + + # find the div element + div = str(soup.find('div', class_='bk-root')) + return div, script1, script2 + + @log_fail + @log_info + def run(self): + """The main method. See module docstrings for further details.""" + + logging.info('Begin logging for msata_monitor') + + # define MSATA variables + self.instrument = "nirspec" + self.aperture = "NRS_FULL_MSA" + + # Identify which database tables to use + self.identify_tables() + + # Get the output directory and setup a directory to store the data + self.output_dir = os.path.join(get_config()['outputs'], 'msata_monitor') + ensure_dir_exists(self.output_dir) + # Set up directory to store the data + ensure_dir_exists(os.path.join(self.output_dir, 'data')) + self.data_dir = os.path.join(self.output_dir, + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) + ensure_dir_exists(self.data_dir) + + # Locate the record of most recent MAST search; use this time + self.query_start = self.most_recent_search() + # get the data of the plots previously created and set the query start date + self.prev_data = None + self.output_file_name = os.path.join(self.output_dir, "msata_layout.html") + logging.info('\tNew output plot file will be written as: {}'.format(self.output_file_name)) + if os.path.isfile(self.output_file_name): + prev_data_dict = self.get_data_from_html(self.output_file_name) + self.prev_data, self.query_start = self.prev_data2expected_format(prev_data_dict) + logging.info('\tPrevious data read from html file: {}'.format(self.output_file_name)) + # move this plot to a previous version + shutil.copyfile(self.output_file_name, os.path.join(self.output_dir, "prev_msata_layout.html")) + # fail save - start from the beginning if there is no html file + else: + self.query_start = self.query_very_beginning + logging.info('\tPrevious output html file not found. Starting MAST query from Jan 28, 2022 == First JWST images (MIRI)') + + # Use the current time as the end time for MAST query + self.query_end = Time.now().mjd + logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) + + # Query for data using the aperture and the time of the + # most recent previous search as the starting time + + # via MAST: + # new_entries = monitor_utils.mast_query_ta( + # self.instrument, self.aperture, self.query_start, self.query_end) + + # via django model: + new_entries = monitor_utils.model_query_ta( + self.instrument, self.aperture, self.query_start, self.query_end) + msata_entries = len(new_entries) + logging.info('\tQuery has returned {} MSATA files for {}, {}.'.format(msata_entries, self.instrument, self.aperture)) + + # Filter new entries to only keep uncal files + new_entries = self.pull_filenames(new_entries) + new_entries = self.get_uncal_names(new_entries) + msata_entries = len(new_entries) + logging.info('\tThere are {} uncal TA files to run the MSATA monitor.'.format(msata_entries)) + + # Get full paths to the files + new_filenames = [] + for filename_of_interest in new_entries: + if (self.prev_data is not None + and filename_of_interest in self.prev_data['filename'].values): + logging.warning('\t\tFile {} already in previous data. Skipping.'.format(filename_of_interest)) + continue + try: + new_filenames.append(filesystem_path(filename_of_interest)) + logging.warning('\tFile {} included for processing.'.format(filename_of_interest)) + except FileNotFoundError: + logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.'.format(filename_of_interest)) + + if len(new_filenames) == 0: + logging.warning('\t\t ** Unable to locate any file in filesystem. Nothing to process. ** ') + + # Run the monitor on any new files + logging.info('\tMSATA monitor found {} new uncal files.'.format(len(new_filenames))) + self.script, self.div, self.msata_data = None, None, None + monitor_run = False + if len(new_filenames) > 0: # new data was found + # get the data + self.new_msata_data, no_ta_ext_msgs = self.get_msata_data(new_filenames) + if len(no_ta_ext_msgs) >= 1: + for item in no_ta_ext_msgs: + logging.info(item) + if self.new_msata_data is not None: + # concatenate with previous data + if self.prev_data is not None: + self.msata_data = pd.concat([self.prev_data, self.new_msata_data]) + logging.info('\tData from previous html output file and new data concatenated.') + else: + self.msata_data = self.new_msata_data + logging.info('\tOnly new data was found - no previous html file.') + else: + logging.info('\tMSATA monitor skipped. No MSATA data found.') + # make sure to return the old data if no new data is found + elif self.prev_data is not None: + self.msata_data = self.prev_data + logging.info('\tNo new data found. Using data from previous html output file.') + # make the plots if there is data + if self.msata_data is not None: + self.script, self.div = self.mk_plt_layout() + monitor_run = True + logging.info('\tOutput html plot file created: {}'.format(self.output_file_name)) + msata_files_used4plots = len(self.msata_data['visit_id']) + logging.info('\t{} MSATA files were used to make plots.'.format(msata_files_used4plots)) + # update the list of successful and failed TAs + self.update_ta_success_txtfile() + logging.info('\tMSATA status file was updated') + else: + logging.info('\tMSATA monitor skipped.') + + # Update the query history + new_entry = {'instrument': 'nirspec', + 'aperture': self.aperture, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'entries_found': msata_entries, + 'files_found': len(new_filenames), + 'run_monitor': monitor_run, + 'entry_date': datetime.now()} + with engine.begin() as connection: + connection.execute(self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') + + logging.info('MSATA Monitor completed successfully.') + + +if __name__ == '__main__': + + module = os.path.basename(__file__).strip('.py') + start_time, log_file = monitor_utils.initialize_instrument_monitor(module) + + monitor = MSATA() + monitor.run() + + monitor_utils.update_monitor_table(module, start_time, log_file) diff --git a/jwql/instrument_monitors/nirspec_monitors/ta_monitors/wata_monitor.py b/jwql/instrument_monitors/nirspec_monitors/ta_monitors/wata_monitor.py new file mode 100755 index 000000000..f248a1d09 --- /dev/null +++ b/jwql/instrument_monitors/nirspec_monitors/ta_monitors/wata_monitor.py @@ -0,0 +1,1035 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + + +# HISTORY +# Feb 2022 - Vr. 1.0: Completed initial version +# Sep 2022 - Vr. 1.1: Modified ColumnDataSource so that data could be recovered +# from an html file of a previous run of the monitor and +# included the code to read and format the data from the html file + + +""" +This module contains the code for the NIRSpec Wide Aperture Target +Acquisition (WATA) monitor, which monitors the TA offsets. + +This monitor displays the comparison of desired versus measured TA. + +This monitor also displays V2, V3 offsets over time. + +Author +______ + - Maria Pena-Guerrero + - Melanie Clarke + +Use +--- + This module can be used from the command line as follows: + python wata_monitor.py + +""" + + +# general imports +import json +import os +import logging +import shutil +from datetime import datetime, timezone, timedelta + +import numpy as np +import pandas as pd +from astropy.time import Time +from astropy.io import fits +from bokeh.embed import components +from bokeh.io import output_file +from bokeh.layouts import gridplot, layout +from bokeh.models import ( + ColumnDataSource, Range1d, CustomJS, CustomJSFilter, CDSView, + Span, Label, DateRangeSlider) +from bokeh.models.tools import HoverTool, BoxSelectTool +from bokeh.plotting import figure, save +from bs4 import BeautifulSoup +from sqlalchemy.sql.expression import and_ + +# jwql imports +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils import monitor_utils +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.database.database_interface import session, engine +from jwql.database.database_interface import NIRSpecTAQueryHistory, NIRSpecTAStats +from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, filename_parser + + +class WATA(): + """ Class for executing the NIRSpec WATA monitor. + + This class will search for new WATA current files in the file systems + for NIRSpec and will run the monitor on these files. The monitor will + extract the TA information from the file headers and perform all + statistical measurements. Results will be saved to the WATA database. + + Attributes + ---------- + output_dir : str + Path into which outputs will be placed. + + data_dir : str + Path into which new dark files will be copied to be worked on. + + query_start : float + MJD start date to use for querying MAST. + + query_end : float + MJD end date to use for querying MAST. + + aperture : str + Name of the aperture used for the dark current (e.g. + "NRS_FULL_MSA", "NRS_S1600A1_SLIT"). + """ + + def __init__(self): + """ Initialize an instance of the WATA class """ + # Very beginning of intake of images: Jan 28, 2022 == First JWST images (MIRI) + self.query_very_beginning = 59607.0 + + # structure to define required keywords to extract and where they live + self.keywds2extract = {'FILENAME': {'loc': 'main_hdr', 'alt_key': None, 'name': 'filename', 'type': str}, + 'DATE-BEG': {'loc': 'main_hdr', 'alt_key': None, 'name': 'date_obs'}, + 'OBS_ID': {'loc': 'main_hdr', 'alt_key': 'OBSID', 'name': 'visit_id'}, + 'FILTER': {'loc': 'main_hdr', 'alt_key': 'FWA_POS', 'name': 'tafilter'}, + 'READOUT': {'loc': 'main_hdr', 'alt_key': 'READPATT', 'name': 'readout'}, + 'TASTATUS': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'ta_status'}, + 'STAT_RSN': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'status_reason'}, + 'REFSTNAM': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'star_name'}, + 'REFSTRA': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'star_ra'}, + 'REFSTDEC': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'star_dec'}, + 'REFSTMAG': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'star_mag'}, + 'REFSTCAT': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'star_catalog'}, + 'V2_PLAND': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'planned_v2'}, + 'V3_PLAND': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'planned_v3'}, + 'EXTCOLST': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'stamp_start_col'}, + 'EXTROWST': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'stamp_start_row'}, + 'TA_DTCTR': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'star_detector'}, + 'BOXPKVAL': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'max_val_box'}, + 'BOXPKCOL': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'max_val_box_col'}, + 'BOXPKROW': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'max_val_box_row'}, + 'TA_ITERS': {'loc': 'ta_hdr', 'alt_key': 'CENITERS', 'name': 'iterations'}, + 'CORR_COL': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'corr_col'}, + 'CORR_ROW': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'corr_row'}, + 'IMCENCOL': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'stamp_final_col'}, + 'IMCENROW': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'stamp_final_row'}, + 'DTCENCOL': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'detector_final_col'}, + 'DTCENROW': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'detector_final_row'}, + 'SCIXCNTR': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'final_sci_x'}, + 'SCIYCNTR': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'final_sci_y'}, + 'TARGETV2': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'measured_v2'}, + 'TARGETV3': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'measured_v3'}, + 'V2_REF': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'ref_v2'}, + 'V3_REF': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'ref_v3'}, + 'V2_RESID': {'loc': 'ta_hdr', 'alt_key': 'V2_OFFST', 'name': 'v2_offset'}, + 'V3_RESID': {'loc': 'ta_hdr', 'alt_key': 'V3_OFFST', 'name': 'v3_offset'}, + 'SAM_X': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'sam_x'}, + 'SAM_Y': {'loc': 'ta_hdr', 'alt_key': None, 'name': 'sam_y'}} + + # initialize attributes to be set later + self.source = None + self.share_tools = [] + self.date_range = None + self.date_view = None + + def get_tainfo_from_fits(self, fits_file): + """ Get the TA information from the fits file + Parameters + ---------- + fits_file: str + This is the fits file for a specific WATA + + Returns + ------- + hdr: dictionary + Dictionary of the primary extension + """ + wata = False + with fits.open(fits_file) as ff: + # make sure this is a WATA file + for hdu in ff: + if 'TARG_ACQ' in hdu.name: + wata = True + break + if not wata: + return None + main_hdr = ff[0].header + try: + ta_hdr = ff['TARG_ACQ'].header + except KeyError: + no_ta_ext_msg = 'No TARG_ACQ extension in file '+fits_file + return no_ta_ext_msg + wata_info = [main_hdr, ta_hdr] + return wata_info + + def get_wata_data(self, new_filenames): + """ Create the data array for the WATA input files + Parameters + ---------- + new_filenames: list + List of WATA file names to consider + + Returns + ------- + wata_df: data frame object + Pandas data frame containing all WATA data + """ + # fill out the dictionary to create the dataframe + wata_dict, no_ta_ext_msgs = {}, [] + for fits_file in new_filenames: + wata_info = self.get_tainfo_from_fits(fits_file) + if isinstance(wata_info, str): + no_ta_ext_msgs.append(wata_info) + continue + if wata_info is None: + continue + main_hdr, ta_hdr = wata_info + for key, key_dict in self.keywds2extract.items(): + key_name = key_dict['name'] + if key_name not in wata_dict: + wata_dict[key_name] = [] + ext = main_hdr + if key_dict['loc'] == 'ta_hdr': + ext = ta_hdr + try: + val = ext[key] + if key == 'filename': + val = fits_file + except KeyError: + val = ext[key_dict['alt_key']] + wata_dict[key_name].append(val) + # create the pandas dataframe + wata_df = pd.DataFrame(wata_dict) + return wata_df, no_ta_ext_msgs + + def add_time_column(self): + """Add time column to data source, to be used by all plots.""" + date_obs = self.source.data['date_obs'] + if 'time_arr' not in self.source.data: + time_arr = [] + for do_str in date_obs: + # convert time string into an array of time (this is in UT) + t = datetime.fromisoformat(do_str) + time_arr.append(t) + + # add to the bokeh data structure + self.source.data["time_arr"] = time_arr + + def plt_status(self): + """ Plot the WATA status (passed = 0 or failed = 1). + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + ta_status = self.source.data['ta_status'] + + # check if this column exists in the data already, else create it + if 'bool_status' not in self.source.data: + # bokeh does not like to plot strings, turn into binary type + bool_status, status_colors = [], [] + for tas in ta_status: + if 'unsuccessful' not in tas.lower(): + bool_status.append(1) + status_colors.append('blue') + else: + bool_status.append(0) + status_colors.append('red') + + # add these to the bokeh data structure + self.source.data["ta_status_bool"] = bool_status + self.source.data["status_colors"] = status_colors + + # create a new bokeh plot + plot = figure(title="WATA Status [Success=1, Fail=0]", x_axis_label='Time', + y_axis_label='WATA Status', x_axis_type='datetime',) + plot.y_range = Range1d(-0.5, 1.5) + plot.circle(x='time_arr', y='ta_status_bool', source=self.source, + color='status_colors', size=7, fill_alpha=0.3, view=self.date_view) + + # make tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('TA status', '@ta_status'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Magnitude', '@star_mag'), + ('--------', '----------------')] + + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_residual_offsets(self): + """ Plot the residual V2 and V3 offsets + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="WATA Residual V2-V3 Offsets", x_axis_label='Residual V2 Offset', + y_axis_label='Residual V3 Offset') + plot.circle(x='v2_offset', y='v3_offset', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.x_range = Range1d(-0.5, 0.5) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin lines + vline = Span(location=0, dimension='height', line_color='black', line_width=0.7) + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([vline, hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('TA status', '@ta_status'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Magnitude', '@star_mag'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_v2offset_time(self): + """ Plot the residual V2 versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="WATA V2 Offset vs Time", x_axis_label='Time', + y_axis_label='Residual V2 Offset', x_axis_type='datetime') + plot.circle(x='time_arr', y='v2_offset', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('TA status', '@ta_status'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Magnitude', '@star_mag'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_v3offset_time(self): + """ Plot the residual V3 versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # create a new bokeh plot + plot = figure(title="WATA V3 Offset vs Time", x_axis_label='Time', + y_axis_label='Residual V3 Offset', x_axis_type='datetime') + plot.circle(x='time_arr', y='v3_offset', source=self.source, + color="blue", size=7, fill_alpha=0.3, view=self.date_view) + plot.y_range = Range1d(-0.5, 0.5) + + # mark origin line + hline = Span(location=0, dimension='width', line_color='black', line_width=0.7) + plot.renderers.extend([hline]) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('TA status', '@ta_status'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Magnitude', '@star_mag'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def plt_mag_time(self): + """ Plot the star magnitude versus time + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # calculate the pseudo magnitudes + max_val_box, time_arr = self.source.data['max_val_box'], self.source.data['time_arr'] + + # check if this column exists in the data already, else create it + if "nrsrapid_f140x" not in self.source.data: + # create the arrays per filter and readout pattern + nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], [] + nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], [] + filter_used, readout = self.source.data['tafilter'], self.source.data['readout'] + for i, val in enumerate(max_val_box): + if '140' in filter_used[i]: + if readout[i].lower() == 'nrsrapid': + nrsrapid_f140x.append(val) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif readout[i].lower() == 'nrsrapidd6': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(val) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif '110' in filter_used[i]: + if readout[i].lower() == 'nrsrapid': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(val) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif readout[i].lower() == 'nrsrapidd6': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(val) + nrsrapidd6_clear.append(np.NaN) + else: + if readout[i].lower() == 'nrsrapid': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(val) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif readout[i].lower() == 'nrsrapidd6': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(val) + + # add to the bokeh data structure + self.source.data["nrsrapid_f140x"] = nrsrapid_f140x + self.source.data["nrsrapid_f110w"] = nrsrapid_f110w + self.source.data["nrsrapid_clear"] = nrsrapid_clear + self.source.data["nrsrapidd6_f140x"] = nrsrapidd6_f140x + self.source.data["nrsrapidd6_f110w"] = nrsrapidd6_f110w + self.source.data["nrsrapidd6_clear"] = nrsrapidd6_clear + + # create a new bokeh plot + plot = figure(title="WATA Counts vs Time", x_axis_label='Time', + y_axis_label='box_peak [Counts]', x_axis_type='datetime') + plot.circle(x='time_arr', y='nrsrapid_f140x', source=self.source, + color="purple", size=7, fill_alpha=0.4, view=self.date_view) + plot.circle(x='time_arr', y='nrsrapidd6_f140x', source=self.source, + color="purple", size=12, fill_alpha=0.4, view=self.date_view) + plot.triangle(x='time_arr', y='nrsrapid_f110w', source=self.source, + color="orange", size=8, fill_alpha=0.4, view=self.date_view) + plot.triangle(x='time_arr', y='nrsrapidd6_f110w', source=self.source, + color="orange", size=13, fill_alpha=0.4, view=self.date_view) + plot.square(x='time_arr', y='nrsrapid_clear', source=self.source, + color="gray", size=7, fill_alpha=0.4, view=self.date_view) + plot.square(x='time_arr', y='nrsrapidd6_clear', source=self.source, + color="gray", size=12, fill_alpha=0.4, view=self.date_view) + + # add count saturation warning lines + loc1, loc2, loc3 = 45000.0, 50000.0, 60000.0 + hline1 = Span(location=loc1, dimension='width', line_color='green', line_width=3) + hline2 = Span(location=loc2, dimension='width', line_color='yellow', line_width=3) + hline3 = Span(location=loc3, dimension='width', line_color='red', line_width=3) + plot.renderers.extend([hline1, hline2, hline3]) + + label1 = Label(x=time_arr[-1], y=loc1, y_units='data', text='45000 counts') + label2 = Label(x=time_arr[-1], y=loc2, y_units='data', text='50000 counts') + label3 = Label(x=time_arr[-1], y=loc3, y_units='data', text='60000 counts') + plot.add_layout(label1) + plot.add_layout(label2) + plot.add_layout(label3) + plot.y_range = Range1d(-1000.0, 62000.0) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('TA status', '@ta_status'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Box peak', '@max_val_box'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def get_unsuccessful_ta(self, arr_name): + """ Find unsuccessful TAs in this set (to be plotted in red) + Parameters + ---------- + arr_name: str, name of the array of interest + Returns + ------- + new_list_failed: list, failed TA values from array of interest + new_list_else: list, non-failed TA values from array of interest + """ + bool_status = self.source.data["ta_status_bool"] + new_list_failed, new_list_else = [], [] + for idx, val in enumerate(self.source.data[arr_name]): + if bool_status[idx] == 0.0: + new_list_failed.append(val) + new_list_else.append(np.NaN) + else: + new_list_failed.append(np.NaN) + new_list_else.append(val) + return new_list_failed, new_list_else + + def plt_centroid(self): + """ Plot the WATA centroid + Parameters + ---------- + None + Returns + ------- + plot: bokeh plot object + """ + # get the failed TAs to plot in red + if "corr_col_failed" not in self.source.data: + corr_col_failed, corr_col_not_failed = self.get_unsuccessful_ta('corr_col') + corr_row_failed, corr_row_not_failed = self.get_unsuccessful_ta('corr_row') + + # add these to the bokeh data structure + self.source.data["corr_col_failed"] = corr_col_failed + self.source.data["corr_col_not_failed"] = corr_col_not_failed + self.source.data["corr_row_failed"] = corr_row_failed + self.source.data["corr_row_not_failed"] = corr_row_not_failed + + # create a new bokeh plot + plot = figure(title="WATA Centroid", x_axis_label='Column', + y_axis_label='Row') + limits = [10, 25] + plot.x_range = Range1d(limits[0], limits[1]) + plot.y_range = Range1d(limits[0], limits[1]) + plot.circle(x='corr_col_not_failed', y='corr_row_not_failed', source=self.source, + color="blue", size=7, fill_alpha=0.5, view=self.date_view) + plot.circle(x='corr_col_failed', y='corr_row_failed', source=self.source, + color="red", size=7, fill_alpha=0.5, view=self.date_view) + plot.x_range = Range1d(0.0, 32.0) + plot.y_range = Range1d(0.0, 32.0) + + # add tooltips + hover = HoverTool() + hover.tooltips = [('File name', '@filename'), + ('Visit ID', '@visit_id'), + ('TA status', '@ta_status'), + ('Filter', '@tafilter'), + ('Readout', '@readout'), + ('Date-Obs', '@date_obs'), + ('Magnitude', '@star_mag'), + ('Box Centr Col', '@corr_col'), + ('Box Centr Row', '@corr_row'), + ('Det Centr Col', '@detector_final_col'), + ('Det Centr Row', '@detector_final_row'), + ('--------', '----------------')] + plot.add_tools(hover) + + # add shared selection tools + for tool in self.share_tools: + plot.add_tools(tool) + return plot + + def setup_date_range(self): + """Set up a date range filter, defaulting to the last week of data.""" + end_date = datetime.now(tz=timezone.utc) + one_week_ago = end_date.date() - timedelta(days=7) + first_data_point = np.min(self.source.data['time_arr']).date() + last_data_point = np.max(self.source.data['time_arr']).date() + if last_data_point < one_week_ago: + # keep at least one point in the plot if there was + # no TA data this week + start_date = last_data_point + else: + start_date = one_week_ago + + # allowed range is from the first ever data point to today + self.date_range = DateRangeSlider( + title="Date range displayed", start=first_data_point, + end=end_date, value=(start_date, end_date), step=1) + + callback = CustomJS(args=dict(s=self.source), code=""" + s.change.emit(); + """) + self.date_range.js_on_change('value', callback) + + filt = CustomJSFilter(args=dict(slider=self.date_range), code=""" + var indices = []; + var start = slider.value[0]; + var end = slider.value[1]; + + for (var i=0; i < source.get_length(); i++) { + if (source.data['time_arr'][i] >= start + && source.data['time_arr'][i] <= end) { + indices.push(true); + } else { + indices.push(false); + } + } + return indices; + """) + self.date_view = CDSView(source=self.source, filters=[filt]) + + def mk_plt_layout(self): + """Create the bokeh plot layout""" + self.source = ColumnDataSource(data=self.wata_data) + + # make sure all arrays are lists in order to later be able to read the data + # from the html file + for item in self.source.data: + if not isinstance(self.source.data[item], (str, float, int, list)): + self.source.data[item] = self.source.data[item].tolist() + + # add a time array to the data source + self.add_time_column() + + # set up selection tools to share + self.share_tools = [BoxSelectTool()] + + # set up a date range filter widget + self.setup_date_range() + + # set the output html file name and create the plot grid + output_file(self.output_file_name) + p1 = self.plt_status() + p2 = self.plt_residual_offsets() + p3 = self.plt_v2offset_time() + p4 = self.plt_v3offset_time() + p5 = self.plt_centroid() + p6 = self.plt_mag_time() + + # make grid + grid = gridplot([p1, p2, p3, p4, p5, p6], ncols=2, merge_tools=False) + box_layout = layout(children=[self.date_range, grid]) + save(box_layout) + + # return the needed components for embeding the results in the WATA html template + script, div = components(box_layout) + return script, div + + def identify_tables(self): + """Determine which database tables to use for a run of the TA monitor.""" + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument] + self.query_table = eval('{}TAQueryHistory'.format(mixed_case_name)) + self.stats_table = eval('{}TAStats'.format(mixed_case_name)) + + def most_recent_search(self): + """Query the query history database and return the information + on the most recent query for the given 'aperture_name' where + the wata monitor was executed. + + Returns + ------- + query_result : float + Date (in MJD) of the ending range of the previous MAST query + where the wata monitor was run. + """ + query = session.query(self.query_table).filter(and_(self.query_table.aperture == self.aperture, + self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() + + dates = np.zeros(0) + for instance in query: + dates = np.append(dates, instance.end_time_mjd) + + query_count = len(dates) + if query_count == 0: + query_result = self.query_very_beginning + logging.info(('\tNo query history for {}. Beginning search date will be set to {}.'.format(self.aperture, self.query_very_beginning))) + else: + query_result = np.max(dates) + + return query_result + + def get_expected_data(self, keywd_dict, tot_number_of_stars): + """This function gets the value append to the dictionary key in the expected format. + Parameters + ---------- + keywd_dict: dictonary + Dictionary corresponding to the file keyword + tot_number_of_stars: integer + Number of stars in the observation + Returns + ------- + val4dict: value + Value appended to the data structure; either string, float or integer + """ + # set the value to add + val = -999 + # return the right type of value + if keywd_dict['type'] == float: + val = float(val) + if keywd_dict['type'] == str: + val = str(val) + val4dict = val + return val4dict + + def get_data_from_html(self, html_file): + """ + This function gets the data from the Bokeh html file created with + the NIRSpec TA monitor script. + Parameters + ---------- + html_file: str + File created by the monitor script + Returns + ------- + prev_data: pandas dataframe + Contains all expected columns to be combined with the new data + latest_prev_obs: str + Date of the latest observation in the previously plotted data + """ + + # open the html file and get the contents + htmlFileToBeOpened = open(html_file, "r") + contents = htmlFileToBeOpened.read() + soup = BeautifulSoup(contents, 'html.parser') + + # now read as python dictionary and search for the data + prev_data_dict = {} + html_data = json.loads(soup.find('script', type='application/json').string) + for key, val in html_data.items(): + if 'roots' in val: # this is a dictionary + if 'references' in val['roots']: + for item in val['roots']['references']: # this is a list + # each item of the list is a dictionary + for item_key, item_val in item.items(): + if 'data' in item_val: + # finally the data dictionary! + for data_key, data_val in item_val['data'].items(): + prev_data_dict[data_key] = data_val + # find the latest observation date + time_in_millis = max(prev_data_dict['time_arr']) + latest_prev_obs = Time(time_in_millis / 1000., format='unix') + latest_prev_obs = latest_prev_obs.mjd + # put data in expected format + prev_data_expected_cols = {} + visit_ids = prev_data_dict['visit_id'] + for file_keywd, keywd_dict in self.keywds2extract.items(): + key = keywd_dict['name'] + if key in prev_data_dict: + # case when the html stored thing is just an object but does not have data + if len(prev_data_dict[key]) < len(visit_ids): + list4dict = self.get_expected_data(keywd_dict, visit_ids) + prev_data_expected_cols[key] = list4dict + # case when nothing special to do + else: + prev_data_expected_cols[key] = prev_data_dict[key] + else: + list4dict = self.get_expected_data(keywd_dict, visit_ids) + prev_data_expected_cols[key] = list4dict + # now convert to a panda dataframe to be combined with the new data + prev_data = pd.DataFrame(prev_data_expected_cols) + return prev_data, latest_prev_obs + + def pull_filenames(self, file_info): + """Extract filenames from the list of file information returned from + query_mast. + + Parameters + ---------- + file_info : dict + Dictionary of file information returned by ``query_mast`` + + Returns + ------- + files : list + List of filenames (without paths) extracted from ``file_info`` + """ + files = [] + for list_element in file_info: + if 'filename' in list_element: + files.append(list_element['filename']) + elif 'root_name' in list_element: + files.append(list_element['root_name']) + return files + + def get_uncal_names(self, file_list): + """Replace the last suffix for _uncal and return list. + Parameters + ---------- + file_list : list + List of fits files + Returns + ------- + good_files : list + Filtered list of uncal file names + """ + good_files = [] + for filename in file_list: + if filename.endswith('.fits'): + # MAST names look like: jw01133003001_02101_00001_nrs2_cal.fits + suffix2replace = filename.split('_')[-1] + filename = filename.replace(suffix2replace, 'uncal.fits') + else: + # rootnames look like: jw01133003001_02101_00001_nrs2 + filename += '_uncal.fits' + if filename not in good_files: + good_files.append(filename) + return good_files + + def update_ta_success_txtfile(self): + """Create a text file with all the failed and successful WATA. + Parameters + ---------- + None + Returns + ------- + Nothing + """ + output_success_ta_txtfile = os.path.join(self.output_dir, "wata_success.txt") + # check if previous file exists and read the data from it + if os.path.isfile(output_success_ta_txtfile): + # now rename the previous file, for backup + os.rename(output_success_ta_txtfile, os.path.join(self.output_dir, "prev_wata_success.txt")) + # get the new data + ta_success, ta_failure = [], [] + filenames, ta_status = self.wata_data.loc[:,'filename'], self.wata_data.loc[:,'ta_status'] + for fname, ta_stat in zip(filenames, ta_status): + # select the appropriate list to append to + if ta_stat == 'SUCCESSFUL': + ta_success.append(fname) + else: + ta_failure.append(fname) + # find which one is the longest list (to make sure the other lists have the same length) + successes, failures = len(ta_success), len(ta_failure) + longest_list = None + if successes >= failures: + longest_list = successes + else: + longest_list = failures + # match length of the lists + for ta_list in [ta_success, ta_failure]: + remaining_items = longest_list - len(ta_list) + if remaining_items != 0: + for _ in range(remaining_items): + ta_list.append("") + # write the new output file + with open(output_success_ta_txtfile, 'w+') as txt: + txt.write("# WATA successes and failure file names \n") + filehdr1 = "# {} Total successful and {} total failed WATA ".format(successes, failures) + filehdr2 = "# {:<50} {:<50}".format("Successes", "Failures") + txt.write(filehdr1 + "\n") + txt.write(filehdr2 + "\n") + for idx, suc in enumerate(ta_success): + line = "{:<50} {:<50}".format(suc, ta_failure[idx]) + txt.write(line + "\n") + + def read_existing_html(self): + """ + This function gets the data from the Bokeh html file created with + the NIRSpec TA monitor script. + """ + self.output_dir = os.path.join(get_config()['outputs'], 'wata_monitor') + ensure_dir_exists(self.output_dir) + + self.output_file_name = os.path.join(self.output_dir, "wata_layout.html") + if not os.path.isfile(self.output_file_name): + return 'No WATA data available', '', '' + + # open the html file and get the contents + with open(self.output_file_name, "r") as html_file: + contents = html_file.read() + + soup = BeautifulSoup(contents, 'html.parser').body + + # find the script elements + script1 = str(soup.find('script', type='text/javascript')) + script2 = str(soup.find('script', type='application/json')) + + # find the div element + div = str(soup.find('div', class_='bk-root')) + return div, script1, script2 + + @log_fail + @log_info + def run(self): + """The main method. See module docstrings for further details.""" + + logging.info('Begin logging for wata_monitor') + + # define WATA variables + self.instrument = "nirspec" + self.aperture = "NRS_S1600A1_SLIT" + + # Identify which database tables to use + self.identify_tables() + + # Get the output directory and setup a directory to store the data + self.output_dir = os.path.join(get_config()['outputs'], 'wata_monitor') + ensure_dir_exists(self.output_dir) + # Set up directories for the copied data + ensure_dir_exists(os.path.join(self.output_dir, 'data')) + self.data_dir = os.path.join(self.output_dir, + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) + ensure_dir_exists(self.data_dir) + + # Locate the record of most recent MAST search; use this time + self.query_start = self.most_recent_search() + # get the data of the plots previously created and set the query start date + self.prev_data = None + self.output_file_name = os.path.join(self.output_dir, "wata_layout.html") + logging.info('\tNew output plot file will be written as: {}'.format(self.output_file_name)) + if os.path.isfile(self.output_file_name): + self.prev_data, self.query_start = self.get_data_from_html(self.output_file_name) + logging.info('\tPrevious data read from html file: {}'.format(self.output_file_name)) + # copy this plot to a previous version + shutil.copyfile(self.output_file_name, os.path.join(self.output_dir, "prev_wata_layout.html")) + # fail save - start from the beginning if there is no html file + else: + self.query_start = self.query_very_beginning + logging.info('\tPrevious output html file not found. Starting MAST query from Jan 28, 2022 == First JWST images (MIRI)') + + # Use the current time as the end time for MAST query + self.query_end = Time.now().mjd + logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) + + # Query for data using the aperture and the time of the + # most recent previous search as the starting time + + # via MAST: + # new_entries = monitor_utils.mast_query_ta( + # self.instrument, self.aperture, self.query_start, self.query_end) + + # via django model: + new_entries = monitor_utils.model_query_ta( + self.instrument, self.aperture, self.query_start, self.query_end) + wata_entries = len(new_entries) + logging.info('\tQuery has returned {} WATA files for {}, {}.'.format(wata_entries, self.instrument, self.aperture)) + + # Filter new entries to only keep uncal files + new_entries = self.pull_filenames(new_entries) + new_entries = self.get_uncal_names(new_entries) + wata_entries = len(new_entries) + logging.info('\tThere are {} uncal TA files to run the WATA monitor.'.format(wata_entries)) + + # Get full paths to the files + new_filenames = [] + for filename_of_interest in new_entries: + if (self.prev_data is not None + and filename_of_interest in self.prev_data['filename'].values): + logging.warning('\t\tFile {} already in previous data. Skipping.'.format(filename_of_interest)) + continue + try: + new_filenames.append(filesystem_path(filename_of_interest)) + logging.warning('\tFile {} included for processing.'.format(filename_of_interest)) + except FileNotFoundError: + logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.'.format(filename_of_interest)) + + if len(new_filenames) == 0: + logging.warning('\t\t ** Unable to locate any file in filesystem. Nothing to process. ** ') + + # Run the monitor on any new files + self.script, self.div, self.wata_data = None, None, None + monitor_run = False + if len(new_filenames) > 0: # new data was found + # get the data + self.new_wata_data, no_ta_ext_msgs = self.get_wata_data(new_filenames) + if len(no_ta_ext_msgs) >= 1: + for item in no_ta_ext_msgs: + logging.info(item) + if self.new_wata_data is not None: + # concatenate with previous data + if self.prev_data is not None: + self.wata_data = pd.concat([self.prev_data, self.new_wata_data]) + logging.info('\tData from previous html output file and new data concatenated.') + else: + self.wata_data = self.new_wata_data + logging.info('\tOnly new data was found - no previous html file.') + else: + logging.info('\tWATA monitor skipped. No WATA data found.') + # make sure to return the old data if no new data is found + elif self.prev_data is not None: + self.wata_data = self.prev_data + logging.info('\tNo new data found. Using data from previous html output file.') + # do the plots if there is any data + if self.wata_data is not None: + self.script, self.div = self.mk_plt_layout() + monitor_run = True + logging.info('\tOutput html plot file created: {}'.format(self.output_file_name)) + wata_files_used4plots = len(self.wata_data['visit_id']) + logging.info('\t{} WATA files were used to make plots.'.format(wata_files_used4plots)) + # update the list of successful and failed TAs + self.update_ta_success_txtfile() + logging.info('\tWATA status file was updated') + else: + logging.info('\tWATA monitor skipped.') + + # Update the query history + new_entry = {'instrument': self.instrument, + 'aperture': self.aperture, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'entries_found': wata_entries, + 'files_found': len(new_filenames), + 'run_monitor': monitor_run, + 'entry_date': datetime.now()} + + with engine.begin() as connection: + connection.execute(self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') + + logging.info('WATA Monitor completed successfully.') + + +if __name__ == '__main__': + + module = os.path.basename(__file__).strip('.py') + start_time, log_file = monitor_utils.initialize_instrument_monitor(module) + + monitor = WATA() + monitor.run() + + monitor_utils.update_monitor_table(module, start_time, log_file) diff --git a/jwql/instrument_monitors/pipeline_tools.py b/jwql/instrument_monitors/pipeline_tools.py index e9754bd17..c39e3bda4 100644 --- a/jwql/instrument_monitors/pipeline_tools.py +++ b/jwql/instrument_monitors/pipeline_tools.py @@ -25,6 +25,7 @@ from jwst.dq_init import DQInitStep from jwst.dark_current import DarkCurrentStep from jwst.firstframe import FirstFrameStep +from jwst.gain_scale import GainScaleStep from jwst.group_scale import GroupScaleStep from jwst.ipc import IPCStep from jwst.jump import JumpStep @@ -34,6 +35,7 @@ from jwst.pipeline.calwebb_detector1 import Detector1Pipeline from jwst.ramp_fitting import RampFitStep from jwst.refpix import RefPixStep +from jwst.reset import ResetStep from jwst.rscd import RscdStep from jwst.saturation import SaturationStep from jwst.superbias import SuperBiasStep @@ -43,16 +45,17 @@ # Define the fits header keyword that accompanies each step PIPE_KEYWORDS = {'S_GRPSCL': 'group_scale', 'S_DQINIT': 'dq_init', 'S_SATURA': 'saturation', - 'S_REFPIX': 'refpix', 'S_SUPERB': 'superbias', + 'S_REFPIX': 'refpix', 'S_SUPERB': 'superbias', 'S_RESET': 'reset', 'S_PERSIS': 'persistence', 'S_DARK': 'dark_current', 'S_LINEAR': 'linearity', 'S_FRSTFR': 'firstframe', 'S_LASTFR': 'lastframe', 'S_RSCD': 'rscd', - 'S_JUMP': 'jump', 'S_RAMP': 'rate'} + 'S_JUMP': 'jump', 'S_RAMP': 'rate', 'S_GANSCL': 'gain_scale', 'S_IPC': 'ipc'} PIPELINE_STEP_MAPPING = {'dq_init': DQInitStep, 'dark_current': DarkCurrentStep, - 'firstframe': FirstFrameStep, 'group_scale': GroupScaleStep, - 'ipc': IPCStep, 'jump': JumpStep, 'lastframe': LastFrameStep, - 'linearity': LinearityStep, 'persistence': PersistenceStep, - 'rate': RampFitStep, 'refpix': RefPixStep, 'rscd': RscdStep, + 'firstframe': FirstFrameStep, 'gain_scale': GainScaleStep, + 'group_scale': GroupScaleStep, 'ipc': IPCStep, 'jump': JumpStep, + 'lastframe': LastFrameStep, 'linearity': LinearityStep, + 'persistence': PersistenceStep, 'rate': RampFitStep, + 'refpix': RefPixStep, 'reset': ResetStep, 'rscd': RscdStep, 'saturation': SaturationStep, 'superbias': SuperBiasStep} # Readout patterns that have nframes != a power of 2. These readout patterns @@ -165,12 +168,8 @@ def get_pipeline_steps(instrument): # Order is important in 'steps' lists below!! if instrument == 'MIRI': - steps = ['group_scale', 'dq_init', 'saturation', 'ipc', 'firstframe', 'lastframe', - 'linearity', 'rscd', 'dark_current', 'refpix', 'persistence', 'jump', 'rate'] - # No persistence correction for MIRI - steps.remove('persistence') - # MIRI is limited to one frame per group - steps.remove('group_scale') + steps = ['group_scale', 'dq_init', 'saturation', 'ipc', 'firstframe', 'lastframe', 'reset', + 'linearity', 'rscd', 'dark_current', 'refpix', 'jump', 'rate', 'gain_scale'] else: steps = ['group_scale', 'dq_init', 'saturation', 'ipc', 'superbias', 'refpix', 'linearity', 'persistence', 'dark_current', 'jump', 'rate'] @@ -186,13 +185,10 @@ def get_pipeline_steps(instrument): # IPC correction currently not done for any instrument steps.remove('ipc') - # Initialize using PIPE_KEYWORDS so the steps will be in the right order + # Initialize using OrderedDict so the steps will be in the right order required_steps = OrderedDict({}) for key in steps: required_steps[key] = True - for key in PIPE_KEYWORDS.values(): - if key not in required_steps.keys(): - required_steps[key] = False return required_steps diff --git a/jwql/jwql_monitors/create_initial_preview_and_thumbnail_listfiles.py b/jwql/jwql_monitors/create_initial_preview_and_thumbnail_listfiles.py index c64b27487..46cf7cdb5 100644 --- a/jwql/jwql_monitors/create_initial_preview_and_thumbnail_listfiles.py +++ b/jwql/jwql_monitors/create_initial_preview_and_thumbnail_listfiles.py @@ -9,11 +9,13 @@ """ from glob import glob import os -import re +from jwql.utils.protect_module import lock_module from jwql.utils.utils import get_config +# Lock module makes create_files() protected code, ensures only one instance of module will run +@lock_module def create_files(): """Create a new set of listfiles""" inst_strings = ['guider', 'nrc', 'miri', 'nis', 'nrs'] diff --git a/jwql/jwql_monitors/generate_preview_images.py b/jwql/jwql_monitors/generate_preview_images.py index aa854bf6a..544683457 100755 --- a/jwql/jwql_monitors/generate_preview_images.py +++ b/jwql/jwql_monitors/generate_preview_images.py @@ -26,6 +26,7 @@ python generate_preview_images.py """ +import argparse import glob import logging import multiprocessing @@ -37,7 +38,8 @@ from jwql.utils import permissions from jwql.utils.constants import IGNORED_SUFFIXES, JWST_INSTRUMENT_NAMES_SHORTHAND, NIRCAM_LONGWAVE_DETECTORS, \ NIRCAM_SHORTWAVE_DETECTORS, PREVIEW_IMAGE_LISTFILE, THUMBNAIL_LISTFILE -from jwql.utils.logging_functions import configure_logging, log_info, log_fail +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils.protect_module import lock_module from jwql.utils.preview_image import PreviewImage from jwql.utils.utils import get_config, filename_parser from jwql.utils.monitor_utils import update_monitor_table, initialize_instrument_monitor @@ -429,6 +431,16 @@ def create_dq_array(xd, yd, mosaic, module): return dq +def define_options(parser=None, usage=None, conflict_handler='resolve'): + """ + """ + if parser is None: + parser = argparse.ArgumentParser(usage=usage, conflict_handler=conflict_handler) + + parser.add_argument('--overwrite', action='store_true', default=None, help='If set, existing preview images will be re-created and overwritten.') + return parser + + def detector_check(detector_list, search_string): """Search a given list of detector names for the provided regular expression sting. @@ -528,16 +540,23 @@ def get_base_output_name(filename_dict): @log_fail @log_info -def generate_preview_images(): +def generate_preview_images(overwrite): """The main function of the ``generate_preview_image`` module. - See module docstring for further details.""" + See module docstring for further details. + + Parameters + ---------- + overwrite : bool + If True, any existing preview images and thumbnails are overwritten + """ # Process programs in parallel program_list = [os.path.basename(item) for item in glob.glob(os.path.join(SETTINGS['filesystem'], 'public', 'jw*'))] program_list.extend([os.path.basename(item) for item in glob.glob(os.path.join(SETTINGS['filesystem'], 'proprietary', 'jw*'))]) program_list = list(set(program_list)) pool = multiprocessing.Pool(processes=int(SETTINGS['cores'])) - results = pool.map(process_program, program_list) + program_list = [(element, overwrite) for element in program_list] + results = pool.starmap(process_program, program_list) pool.close() pool.join() @@ -652,32 +671,52 @@ def group_filenames(filenames): return grouped -def process_program(program): +def process_program(program, overwrite): """Generate preview images and thumbnails for the given program. Parameters ---------- program : str The program identifier (e.g. ``88600``) + overwrite : bool + If False, skip over preview images/thumbnails that already exist. + Only create images that do not currenlty exist. If True, create + preview_images and thumbnails for all input files, regardless of + whether the images already exist. + + Returns + ------- + preview_image_files : list + List of preview image filenames + thumbnail_files : list + List of thumbnail image filenames """ logging.info('') logging.info('Processing {}'.format(program)) # Gather files to process - filenames = glob.glob(os.path.join(SETTINGS['filesystem'], 'public', program, '*/*.fits')) - filenames.extend(glob.glob(os.path.join(SETTINGS['filesystem'], 'proprietary', program, '*/*.fits'))) + filenames = glob.glob(os.path.join(SETTINGS['filesystem'], 'public', program, 'jw*/*.fits')) + filenames.extend(glob.glob(os.path.join(SETTINGS['filesystem'], 'proprietary', program, 'jw*/*.fits'))) filenames = list(set(filenames)) - # Ignore "original" files - filenames = [filename for filename in filenames if os.path.splitext(filename.split('_')[-1]) not in IGNORED_SUFFIXES] + # remove specific "ignored" suffix files (currently "original" and "stream") + filenames = [filename for filename in filenames if os.path.splitext(filename.split('_')[-1])[0] not in IGNORED_SUFFIXES] + + # Remove guiding files, as these are not currently visible in JWQL anyway + filenames = [filename for filename in filenames if 'guider_mode' not in filename_parser(filename)] + logging.info('Found {} filenames'.format(len(filenames))) logging.info('') + new_preview_counter = 0 + existing_preview_counter = 0 thumbnail_files = [] preview_image_files = [] for filename in filenames: + logging.debug(f'Working on {filename}') + # Determine the save location try: identifier = 'jw{}'.format(filename_parser(filename)['program_id']) @@ -687,10 +726,17 @@ def process_program(program): thumbnail_output_directory = os.path.join(SETTINGS['thumbnail_filesystem'], identifier) # Check to see if the preview images already exist and skip if they do - file_exists = check_existence([filename], preview_output_directory) - if file_exists: - logging.info("\tJPG already exists for {}, skipping.".format(filename)) - continue + if not overwrite: + # If overwrite is False, we create preview images only for files that + # don't have them yet. + file_exists = check_existence([filename], preview_output_directory) + if file_exists: + logging.debug("\tJPG already exists for {}, skipping.".format(filename)) + existing_preview_counter += 1 + continue + else: + # If overwrite is set to True, then we always create a new image + file_exists = False # Create the output directories if necessary if not os.path.exists(preview_output_directory): @@ -711,13 +757,27 @@ def process_program(program): im.output_format = 'jpg' im.preview_output_directory = preview_output_directory im.thumbnail_output_directory = thumbnail_output_directory - im.make_image(max_img_size=8) - thumbnail_files.extend(im.thumbnail_images) + + # Create a thumbnail for rate or dark files only. Create preview + # images for all filetypes + if 'rate.fits' in filename or 'dark.fits' in filename: + im.make_image(max_img_size=8, create_thumbnail=True) + new_preview_counter += 1 + thumbnail_files.extend(im.thumbnail_images) + logging.debug('\tCreated preview image and thumbnail for: {}'.format(filename)) + else: + im.make_image(max_img_size=8, create_thumbnail=False) + new_preview_counter += 1 + logging.debug('\tCreated preview image for: {}'.format(filename)) + preview_image_files.extend(im.preview_images) - logging.info('\tCreated preview image and thumbnail for: {}'.format(filename)) + except (ValueError, AttributeError) as error: logging.warning(error) + logging.info(f"Created {new_preview_counter} new preview images.") + logging.info(f"Skipped {existing_preview_counter} previously-existing preview images.") + return preview_image_files, thumbnail_files @@ -758,10 +818,23 @@ def update_listfile(filename, file_list, filetype): logging.info(f"{filetype} image listfile {filename} updated with new entries.") -if __name__ == '__main__': +@lock_module +def protected_code(overwrite): + """Protected code ensures only 1 instance of module will run at any given time + Parameters + ---------- + overwrite : bool + If True, any existing preview images and thumbnails are overwritten + """ module = os.path.basename(__file__).strip('.py') start_time, log_file = initialize_instrument_monitor(module) - generate_preview_images() + generate_preview_images(overwrite) update_monitor_table(module, start_time, log_file) + + +if __name__ == '__main__': + parser = define_options() + args = parser.parse_args() + protected_code(args.overwrite) diff --git a/jwql/jwql_monitors/generate_proposal_thumbnails.py b/jwql/jwql_monitors/generate_proposal_thumbnails.py index d9b5d0a94..bcbf2164f 100755 --- a/jwql/jwql_monitors/generate_proposal_thumbnails.py +++ b/jwql/jwql_monitors/generate_proposal_thumbnails.py @@ -30,9 +30,10 @@ import os import shutil -from jwql.utils.logging_functions import configure_logging, log_info, log_fail +from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.utils import get_config from jwql.utils.monitor_utils import initialize_instrument_monitor, update_monitor_table +from jwql.utils.protect_module import lock_module SETTINGS = get_config() @@ -43,18 +44,21 @@ def generate_proposal_thumbnails(): """The main function of the ``generate_proposal_thumbnails`` module. See module docstring for further details.""" - proposal_dirs = glob.glob(os.path.join(SETTINGS['thumbnail_filesystem'], '*')) + proposal_dirs = glob.glob(os.path.join(SETTINGS['thumbnail_filesystem'], 'jw*')) for proposal_dir in proposal_dirs: rate_thumbnails = glob.glob(os.path.join(proposal_dir, '*rate*.thumb')) + dark_thumbnails = glob.glob(os.path.join(proposal_dir, '*dark*.thumb')) uncal_thumbnails = glob.glob(os.path.join(proposal_dir, '*uncal*.thumb')) if rate_thumbnails: thumbnail = rate_thumbnails[0] + elif dark_thumbnails: + thumbnail = dark_thumbnails[0] elif uncal_thumbnails: thumbnail = uncal_thumbnails[0] else: thumbnail = None - logging.info('No uncal or rate files found for {}. No thumbnail generated.'.format(proposal_dir)) + logging.info('No uncal, dark, or rate files found for {}. No thumbnail generated.'.format(proposal_dir)) if thumbnail: proposal = os.path.basename(thumbnail)[0:7] @@ -63,10 +67,15 @@ def generate_proposal_thumbnails(): logging.info('Copied {} to {}'.format(thumbnail, outfile)) -if __name__ == '__main__': - +@lock_module +def protected_code(): + """Protected code ensures only 1 instance of module will run at any given time""" module = os.path.basename(__file__).strip('.py') start_time, log_file = initialize_instrument_monitor(module) generate_proposal_thumbnails() update_monitor_table(module, start_time, log_file) + + +if __name__ == '__main__': + protected_code() diff --git a/jwql/jwql_monitors/monitor_cron_jobs.py b/jwql/jwql_monitors/monitor_cron_jobs.py index 63f5ec560..39339b5ba 100755 --- a/jwql/jwql_monitors/monitor_cron_jobs.py +++ b/jwql/jwql_monitors/monitor_cron_jobs.py @@ -36,10 +36,12 @@ from bokeh.models import ColumnDataSource from bokeh.models.widgets import DataTable, DateFormatter, HTMLTemplateFormatter, TableColumn -from jwql.utils.logging_functions import configure_logging, log_info, log_fail +from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_config from jwql.utils.monitor_utils import initialize_instrument_monitor, update_monitor_table +from jwql.utils.utils import ensure_dir_exists +from jwql.utils.protect_module import lock_module SETTINGS = get_config() @@ -108,8 +110,12 @@ def create_table(status_dict): output_dir = SETTINGS['outputs'] output_filename = 'cron_status_table' + # verify/create output sub-directory + output_dir = os.path.join(output_dir, 'monitor_cron_jobs') + ensure_dir_exists(output_dir) + # Save full html - html_outfile = os.path.join(output_dir, 'monitor_cron_jobs', '{}.html'.format(output_filename)) + html_outfile = os.path.join(output_dir, '{}.html'.format(output_filename)) output_file(html_outfile) save(data_table) try: @@ -323,10 +329,15 @@ def success_check(filename): return execution -if __name__ == '__main__': - +@lock_module +def protected_code(): + """Protected code ensures only 1 instance of module will run at any given time""" module = os.path.basename(__file__).strip('.py') start_time, log_file = initialize_instrument_monitor(module) status() update_monitor_table(module, start_time, log_file) + + +if __name__ == '__main__': + protected_code() diff --git a/jwql/jwql_monitors/monitor_filesystem.py b/jwql/jwql_monitors/monitor_filesystem.py index 5f012e438..2c1f4d379 100755 --- a/jwql/jwql_monitors/monitor_filesystem.py +++ b/jwql/jwql_monitors/monitor_filesystem.py @@ -11,6 +11,7 @@ - Misty Cracraft - Sara Ogaz - Matthew Bourque + - Bryan Hilbert Use --- @@ -40,22 +41,29 @@ import os import subprocess +from astroquery.mast import Mast, Observations from bokeh.embed import components from bokeh.layouts import gridplot from bokeh.palettes import Category20_20 as palette from bokeh.plotting import figure, output_file, save +import numpy as np +from sqlalchemy.exc import DataError from jwql.database.database_interface import engine from jwql.database.database_interface import session +from jwql.database.database_interface import FilesystemCharacteristics from jwql.database.database_interface import FilesystemGeneral from jwql.database.database_interface import FilesystemInstrument from jwql.database.database_interface import CentralStore -from jwql.utils.logging_functions import configure_logging, log_info, log_fail +from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.permissions import set_permissions -from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.constants import FILESYSTEM_MONITOR_SUBDIRS, FILE_SUFFIX_TYPES, FILTERS_PER_INSTRUMENT, INSTRUMENT_SERVICE_MATCH +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import filename_parser from jwql.utils.utils import get_config from jwql.utils.monitor_utils import initialize_instrument_monitor, update_monitor_table +from jwql.utils.protect_module import lock_module +from jwql.website.apps.jwql.data_containers import get_instrument_proposals SETTINGS = get_config() FILESYSTEM = SETTINGS['filesystem'] @@ -63,6 +71,37 @@ PUBLIC_FILESYSTEM = os.path.join(FILESYSTEM, 'public') CENTRAL = SETTINGS['jwql_dir'] OUTPUTS = SETTINGS['outputs'] +PREVIEW_IMAGES = SETTINGS['preview_image_filesystem'] +THUMBNAILS = SETTINGS['thumbnail_filesystem'] +LOGS = SETTINGS['log_dir'] + +def files_per_filter(): + """Querying MAST (rather than looping through the filesystem), determine how + many files use each filter for each instrument. Note that thiw function takes + a long time (~minutes per filter) to execute. + + Returns + ------- + n_obs : dict + Dictionary with filter names as keys, and values of the number of Observations that + use that particular filter. + """ + # Generate a list of filter/pupil pairs, to use as keys + from astropy.table import unique, vstack + n_files = {} + for instrument in JWST_INSTRUMENT_NAMES: + n_files[instrument] = {} + for fname in FILTERS_PER_INSTRUMENT[instrument]: # note that this does not include pupil wheel-based filters + obs = Observations.query_criteria(filters=fname, instrument_name=JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument]) + batch_size = 5 + batches = [obs[i:i+batch_size] for i in range(0, len(obs), batch_size)] + + obs_table = [Observations.get_product_list(batch) for batch in batches] + products = unique(vstack(obs_table), keys='productFilename') + filtered_products = Observations.filter_products(products, productType=["SCIENCE"], productSubGroupDescription=['UNCAL'], extension="fits") + + n_files[instrument][fname] = obs + return n_files def gather_statistics(general_results_dict, instrument_results_dict): @@ -92,31 +131,45 @@ def gather_statistics(general_results_dict, instrument_results_dict): for filename in files: file_path = os.path.join(dirpath, filename) - general_results_dict['total_file_size'] += os.path.getsize(file_path) - - if filename.endswith(".fits"): - - # Parse out filename information - try: - filename_dict = filename_parser(filename) - except ValueError: - break - filetype = filename_dict['suffix'] - instrument = filename_dict['instrument'] - - # Populate general stats - general_results_dict['fits_file_count'] += 1 - general_results_dict['fits_file_size'] += os.path.getsize(file_path) - - # Populate instrument specific stats - if instrument not in instrument_results_dict: - instrument_results_dict[instrument] = {} - if filetype not in instrument_results_dict[instrument]: - instrument_results_dict[instrument][filetype] = {} - instrument_results_dict[instrument][filetype]['count'] = 0 - instrument_results_dict[instrument][filetype]['size'] = 0 - instrument_results_dict[instrument][filetype]['count'] += 1 - instrument_results_dict[instrument][filetype]['size'] += os.path.getsize(file_path) / (2**40) + if os.path.isfile(file_path): + general_results_dict['total_file_size'] += os.path.getsize(file_path) + + if filename.endswith(".fits"): + + # Parse out filename information + try: + filename_dict = filename_parser(filename) + except ValueError: + break + + # For MSA files, which do not have traditional suffixes, set the + # suffix to "msa" + if 'suffix' not in filename_dict: + if filename_dict['filename_type'] == 'stage_2_msa': + filename_dict['suffix'] = 'msa' + + try: + filetype = filename_dict['suffix'] + instrument = filename_dict['instrument'] + except KeyError: + logging.info(f'File {filename} skipped as it contains either no suffix or no instrument name from the filename parser.') + filetype = None + instrument = None + + # Populate general stats + general_results_dict['fits_file_count'] += 1 + general_results_dict['fits_file_size'] += os.path.getsize(file_path) + + if filetype is not None: + # Populate instrument specific stats + if instrument not in instrument_results_dict: + instrument_results_dict[instrument] = {} + if filetype not in instrument_results_dict[instrument]: + instrument_results_dict[instrument][filetype] = {} + instrument_results_dict[instrument][filetype]['count'] = 0 + instrument_results_dict[instrument][filetype]['size'] = 0 + instrument_results_dict[instrument][filetype]['count'] += 1 + instrument_results_dict[instrument][filetype]['size'] += os.path.getsize(file_path) / (2**40) # Convert file sizes to terabytes general_results_dict['total_file_size'] = general_results_dict['total_file_size'] / (2**40) @@ -171,21 +224,23 @@ def get_area_stats(central_storage_dict): """ logging.info('Gathering stats for central storage area') - arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] + areas = {'outputs': OUTPUTS, + 'logs': LOGS, + 'preview_images': PREVIEW_IMAGES, + 'thumbnails': THUMBNAILS, + 'all': CENTRAL} + counteddirs = [] sums = 0 # to be used to count 'all' - for area in arealist: + for area in areas: used = 0 # initialize area in dictionary if area not in central_storage_dict: central_storage_dict[area] = {} - if area == 'all': - fullpath = CENTRAL - else: - fullpath = os.path.join(CENTRAL, area) + fullpath = areas[area] logging.info('\tSearching directory {}'.format(fullpath)) counteddirs.append(fullpath) @@ -233,6 +288,69 @@ def get_area_stats(central_storage_dict): return central_storage_dict +def get_observation_characteristics(): + """Query MAST and count the number of observations that make use of each + filter/pupil pair for each instrument. + + Returns + ------- + n_obs : dict + Dictionary with instrument names as the top level keys, and lists of 2-tuples + as values. Each tuple contains filter/pupil string and the number of + observations that use that filter/pupil. + """ + n_obs = {} + for instrument in ['nircam', 'niriss', 'nirspec', 'miri']: # Skip FGS here. It has no filters + service = INSTRUMENT_SERVICE_MATCH[JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument]] + n_obs[instrument] = {} + + # Get the list of proposal numbers for the given instrument + proposal_list = get_instrument_proposals(instrument) + + # Different instruments hold the optical elements in different fields + if instrument in ['nircam', 'niriss']: + colval = "filter,pupil,observtn" + elif instrument == 'nirspec': + colval = "filter,grating,observtn" + elif instrument == 'miri': + colval = "filter,observtn" + optics = colval.split(',') + + for proposal in proposal_list: + filters = [{'paramName': 'program', "values": [proposal]}] + columns = colval + params = {"columns": columns, "filters": filters} + response = Mast.service_request_async(service, params) + result = response[0].json() + result_array = np.array(result['data']) + + # Get a list of all the observation numbers within the proposal + all_obs_nums = np.array([f'{entry["observtn"]}' for entry in result['data']]) + obs_nums = list(set(all_obs_nums)) + + for obs_num in obs_nums: + # Idenitfy which entries use the given obs_num + match = np.where(all_obs_nums == obs_num)[0] + + # Generate a list of filter/pupil values used in the proposal. For MIRI, + # just keep the filter name. + if instrument != 'miri': + filter_pupils = sorted(list(set([f'{entry[optics[0]]}/{entry[optics[1]]}' for entry in result_array[match]]))) + else: + filter_pupils = sorted(list(set([f'{entry[optics[0]]}' for entry in result_array[match]]))) + + # Increment dictionary values for the existing filter_pupil values + for filter_pupil in filter_pupils: + if filter_pupil in n_obs[instrument]: + n_obs[instrument][filter_pupil] += 1 + else: + n_obs[instrument][filter_pupil] = 1 + + # Sort the filter/pupil list to make future plot more readable + n_obs[instrument] = sorted(n_obs[instrument].items()) + return n_obs + + def initialize_results_dicts(): """Initializes dictionaries that will hold filesystem statistics @@ -284,242 +402,64 @@ def monitor_filesystem(): # Get stats on central storage areas central_storage_dict = get_area_stats(central_storage_dict) + # Get stats on number of observations with particular characteristics + characteristics = get_observation_characteristics() + # Add data to database tables update_database(general_results_dict, instrument_results_dict, central_storage_dict) + update_characteristics_database(characteristics) + update_central_store_database(central_storage_dict) - # Create the plots - plot_filesystem_stats() - -def plot_by_filetype(plot_type, instrument): - """Plot ``count`` or ``size`` by filetype versus date for the given - instrument, or all instruments. +def update_central_store_database(central_storage_dict): + """Updates the ``CentralStore`` database table with info on disk space Parameters ---------- - plot_type : str - Which data to plot. Either ``count`` or ``size``. - instrument : str - The instrument to plot for. Can be a valid JWST instrument or - ``all`` to plot across all instruments. - - Returns - ------- - plot : bokeh.plotting.figure.Figure object - ``bokeh`` plot of total file counts versus date - """ - - # Determine plot title - if instrument == 'all': - title = 'Total File {} by Type'.format(plot_type.capitalize()) - else: - instrument_title = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument] - title = '{} Total File {} by Type'.format(instrument_title, plot_type.capitalize()) - - if plot_type == 'count': - ytitle = 'Counts' - else: - ytitle = 'Size (TB)' - - # Initialize plot - plot = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', - x_axis_type='datetime', - title=title, - x_axis_label='Date', - y_axis_label=ytitle) - colors = itertools.cycle(palette) - - for filetype, color in zip(FILE_SUFFIX_TYPES, colors): - - # Query for counts - results = session.query(FilesystemInstrument.date, getattr(FilesystemInstrument, plot_type))\ - .filter(FilesystemInstrument.filetype == filetype) - - if instrument == 'all': - results = results.all() - else: - results = results.filter(FilesystemInstrument.instrument == instrument).all() - - # Group by date - if results: - results_dict = defaultdict(int) - for date, value in results: - results_dict[date] += value - - # Parse results so they can be easily plotted - dates = list(results_dict.keys()) - values = list(results_dict.values()) - - # Plot the results - plot.line(dates, values, legend='{} files'.format(filetype), line_color=color) - plot.circle(dates, values, color=color) - - session.close() - - return plot - - -def plot_filesystem_size(): - """Plot filesystem sizes (size, used, available) versus date - - Returns - ------- - plot : bokeh.plotting.figure.Figure object - ``bokeh`` plot of total file counts versus date + central_storage_dict : dict + A dictionary for the ``central_storage`` database table """ - - # Plot system stats vs. date - results = session.query(FilesystemGeneral.date, FilesystemGeneral.total_file_size, - FilesystemGeneral.used, FilesystemGeneral.available).all() - dates, total_sizes, useds, availables = zip(*results) - plot = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', - x_axis_type='datetime', - title='System stats', - x_axis_label='Date', - y_axis_label='Size TB') - plot.line(dates, total_sizes, legend='Total size', line_color='red') - plot.circle(dates, total_sizes, color='red') - plot.line(dates, useds, legend='Used bytes', line_color='green') - plot.circle(dates, useds, color='green') - plot.line(dates, availables, legend='Free bytes', line_color='blue') - plot.circle(dates, availables, color='blue') - - session.close() - return plot - - -def plot_central_store_dirs(): - """Plot central store sizes (size, used, available) versus date - - Returns - ------- - plot : bokeh.plotting.figure.Figure object - ``bokeh`` plot of total directory size versus date - """ - - # Plot system stats vs. date - results = session.query(CentralStore.date, CentralStore.size, CentralStore.available).all() - - arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] - - # Initialize plot - dates, total_sizes, availables = zip(*results) - plot = figure( - tools='pan,box_zoom,wheel_zoom,reset,save', - x_axis_type='datetime', - title='Central Store stats', - x_axis_label='Date', - y_axis_label='Size TB') - colors = itertools.cycle(palette) - - plot.line(dates, total_sizes, legend='Total size', line_color='red') - plot.circle(dates, total_sizes, color='red') - plot.line(dates, availables, legend='Free', line_color='blue') - plot.circle(dates, availables, color='blue') - - # This part of the plot should cycle through areas and plot area used values vs. date - for area, color in zip(arealist, colors): - - # Query for used sizes - results = session.query(CentralStore.date, CentralStore.used).filter(CentralStore.area == area) - - # Group by date - if results: - results_dict = defaultdict(int) - for date, value in results: - results_dict[date] += value - - # Parse results so they can be easily plotted - dates = list(results_dict.keys()) - values = list(results_dict.values()) - - # Plot the results - plot.line(dates, values, legend='{} files'.format(area), line_color=color) - plot.circle(dates, values, color=color) - + for area in FILESYSTEM_MONITOR_SUBDIRS: + new_record = {} + new_record['date'] = central_storage_dict['date'] + new_record['area'] = area + new_record['size'] = central_storage_dict[area]['size'] + new_record['used'] = central_storage_dict[area]['used'] + new_record['available'] = central_storage_dict[area]['available'] + with engine.begin() as connection: + connection.execute(CentralStore.__table__.insert(), new_record) session.close() - return plot - - -def plot_filesystem_stats(): - """ - Plot various filesystem statistics using ``bokeh`` and save them to - the output directory. - """ - logging.info('Creating results plots') - - p1 = plot_total_file_counts() - p2 = plot_filesystem_size() - p3 = plot_by_filetype('count', 'all') - p4 = plot_by_filetype('size', 'all') - p5 = plot_central_store_dirs() - plot_list = [p1, p2, p3, p4, p5] - - for instrument in JWST_INSTRUMENT_NAMES: - plot_list.append(plot_by_filetype('count', instrument)) - plot_list.append(plot_by_filetype('size', instrument)) - - # Create a layout with a grid pattern - grid_chunks = [plot_list[i:i + 2] for i in range(0, len(plot_list), 2)] - grid = gridplot(grid_chunks) - - # Save all of the plots in one file - outputs_dir = os.path.join(OUTPUTS, 'monitor_filesystem') - outfile = os.path.join(outputs_dir, 'filesystem_monitor.html') - output_file(outfile) - save(grid) - set_permissions(outfile) - logging.info('\tSaved plot of all statistics to {}'.format(outfile)) - - # Save each plot's components - for plot in plot_list: - plot_name = plot.title.text.lower().replace(' ', '_') - plot.sizing_mode = 'stretch_both' - script, div = components(plot) - - div_outfile = os.path.join(outputs_dir, "{}_component.html".format(plot_name)) - with open(div_outfile, 'w') as f: - f.write(div) - f.close() - set_permissions(div_outfile) - - script_outfile = os.path.join(outputs_dir, "{}_component.js".format(plot_name)) - with open(script_outfile, 'w') as f: - f.write(script) - f.close() - set_permissions(script_outfile) - logging.info('\tSaved components files: {}_component.html and {}_component.js'.format(plot_name, plot_name)) +def update_characteristics_database(char_info): + """Updates the ``filesystem_characteristics`` database table. - -def plot_total_file_counts(): - """Plot total file counts versus date - - Returns - ------- - plot : bokeh.plotting.figure.Figure object - ``bokeh`` plot of total file counts versus date + Parameters + ---------- + char_info : dict + A dictionary of characteristic information. Keys are + instrument names, and values are lists of tuples. Each tuple is + composed of a filter/pupil string and a count for the number of observations + using that filter/pupil. """ + logging.info('\tUpdating the characteristics database') + now = datetime.datetime.now() - # Total file counts vs. date - results = session.query(FilesystemGeneral.date, FilesystemGeneral.total_file_count).all() - dates, file_counts = zip(*results) - plot = figure( - tools='pan,box_zoom,reset,wheel_zoom,save', - x_axis_type='datetime', - title="Total File Counts", - x_axis_label='Date', - y_axis_label='Count') - plot.line(dates, file_counts, line_width=2, line_color='blue') - plot.circle(dates, file_counts, color='blue') + # Add data to filesystem_instrument table + for instrument in ['nircam', 'niriss', 'nirspec', 'miri']: + optics = [e[0] for e in char_info[instrument]] + values = [e[1] for e in char_info[instrument]] + new_record = {} + new_record['date'] = now + new_record['instrument'] = instrument + new_record['filter_pupil'] = optics + new_record['obs_per_filter_pupil'] = values + with engine.begin() as connection: + connection.execute( + FilesystemCharacteristics.__table__.insert(), new_record) session.close() - return plot - def update_database(general_results_dict, instrument_results_dict, central_storage_dict): """Updates the ``filesystem_general`` and ``filesystem_instrument`` @@ -531,14 +471,11 @@ def update_database(general_results_dict, instrument_results_dict, central_stora A dictionary for the ``filesystem_general`` database table instrument_results_dict : dict A dictionary for the ``filesystem_instrument`` database table - central_storage_dict : dict - A dictionary for the ``central_storage`` database table - """ logging.info('\tUpdating the database') - engine.execute(FilesystemGeneral.__table__.insert(), general_results_dict) - session.commit() + with engine.begin() as connection: + connection.execute(FilesystemGeneral.__table__.insert(), general_results_dict) # Add data to filesystem_instrument table for instrument in JWST_INSTRUMENT_NAMES: @@ -549,25 +486,21 @@ def update_database(general_results_dict, instrument_results_dict, central_stora new_record['filetype'] = filetype new_record['count'] = instrument_results_dict[instrument][filetype]['count'] new_record['size'] = instrument_results_dict[instrument][filetype]['size'] - engine.execute(FilesystemInstrument.__table__.insert(), new_record) - session.commit() - # Add data to central_storage table - arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] - for area in arealist: - new_record = {} - new_record['date'] = central_storage_dict['date'] - new_record['area'] = area - new_record['size'] = central_storage_dict[area]['size'] - new_record['used'] = central_storage_dict[area]['used'] - new_record['available'] = central_storage_dict[area]['available'] - engine.execute(CentralStore.__table__.insert(), new_record) - session.commit() + # Protect against updated enum options that have not been propagated to + # the table definition + try: + with engine.begin() as connection: + connection.execute(FilesystemInstrument.__table__.insert(), new_record) + except DataError as e: + logging.error(e) session.close() -if __name__ == '__main__': +@lock_module +def protected_code(): + """Protected code ensures only 1 instance of module will run at any given time""" # Configure logging module = os.path.basename(__file__).strip('.py') @@ -575,3 +508,7 @@ def update_database(general_results_dict, instrument_results_dict, central_stora monitor_filesystem() update_monitor_table(module, start_time, log_file) + + +if __name__ == '__main__': + protected_code() diff --git a/jwql/jwql_monitors/monitor_mast.py b/jwql/jwql_monitors/monitor_mast.py deleted file mode 100755 index 846d72444..000000000 --- a/jwql/jwql_monitors/monitor_mast.py +++ /dev/null @@ -1,278 +0,0 @@ -#! /usr/bin/env python - -"""This module is home to a suite of MAST queries that gather bulk -properties of available JWST data for JWQL. - -Authors -------- - - Joe Filippazzo - -Use ---- - - To get an inventory of all JWST files do: - :: - - from jwql.jwql_monitors import monitor_mast - inventory, keywords = monitor_mast.jwst_inventory() -""" - -import logging -import os - -from astroquery.mast import Mast -from bokeh.embed import components -from bokeh.io import save, output_file -import pandas as pd - -from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_DATAPRODUCTS -from jwql.utils.logging_functions import configure_logging, log_info, log_fail -from jwql.utils.permissions import set_permissions -from jwql.utils.utils import get_config -from jwql.utils import monitor_utils -from jwql.utils.plotting import bar_chart - - -# Temporary until JWST operations: switch to test string for MAST request URL -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') -if not ON_GITHUB_ACTIONS: - Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] - - -def instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, - add_filters=None, add_requests=None, - caom=False, return_data=False): - """Get the counts for a given instrument and data product - - Parameters - ---------- - instrument: str - The instrument name, i.e. one of ['niriss','nircam','nirspec', - 'miri','fgs'] - dataproduct: sequence, str - The type of data product to search - add_filters: dict - The ('paramName':'values') pairs to include in the 'filters' - argument of the request e.g. add_filters = {'filter':'GR150R'} - add_requests: dict - The ('request':'value') pairs to include in the request - e.g. add_requests = {'pagesize':1, 'page':1} - caom: bool - Query CAOM service - return_data: bool - Return the actual data instead of counts only - - Returns - ------- - int, dict - The number of database records that satisfy the search criteria - or a dictionary of the data if `return_data=True` - """ - filters = [] - - # Make sure the dataproduct is a list - if isinstance(dataproduct, str): - dataproduct = [dataproduct] - - # Make sure the instrument is supported - if instrument.lower() not in [ins.lower() for ins in JWST_INSTRUMENT_NAMES]: - raise TypeError('Supported instruments include:', JWST_INSTRUMENT_NAMES) - - # CAOM service - if caom: - - # Declare the service - service = 'Mast.Caom.Filtered' - - # Set the filters - filters += [{'paramName': 'obs_collection', 'values': ['JWST']}, - {'paramName': 'instrument_name', 'values': [instrument]}, - {'paramName': 'dataproduct_type', 'values': dataproduct}] - - # Instruent filtered service - else: - - # Declare the service - service = 'Mast.Jwst.Filtered.{}'.format(instrument.title()) - - # Include additonal filters - if isinstance(add_filters, dict): - filters += [{"paramName": name, "values": [val]} - for name, val in add_filters.items()] - - # Assemble the request - params = {'columns': 'COUNT_BIG(*)', - 'filters': filters, - 'removenullcolumns': True} - - # Just get the counts - if return_data: - params['columns'] = '*' - - # Add requests - if isinstance(add_requests, dict): - params.update(add_requests) - - response = Mast.service_request_async(service, params) - result = response[0].json() - - # Return all the data - if return_data: - return result - - # Or just the counts - else: - return result['data'][0]['Column1'] - - -def instrument_keywords(instrument, caom=False): - """Get the keywords for a given instrument service - - Parameters - ---------- - instrument: str - The instrument name, i.e. one of ['niriss','nircam','nirspec', - 'miri','fgs'] - caom: bool - Query CAOM service - - Returns - ------- - pd.DataFrame - A DataFrame of the keywords - """ - # Retrieve one dataset to get header keywords - sample = instrument_inventory(instrument, return_data=True, caom=caom, - add_requests={'pagesize': 1, 'page': 1}) - data = [[i['name'], i['type']] for i in sample['fields']] - keywords = pd.DataFrame(data, columns=('keyword', 'dtype')) - - return keywords - - -def jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, - dataproducts=['image', 'spectrum', 'cube'], - caom=False, plot=False): - """Gather a full inventory of all JWST data in each instrument - service by instrument/dtype - - Parameters - ---------- - instruments: sequence - The list of instruments to count - dataproducts: sequence - The types of dataproducts to count - caom: bool - Query CAOM service - plot: bool - Return a pie chart of the data - - Returns - ------- - astropy.table.table.Table - The table of record counts for each instrument and mode - """ - logging.info('Searching database...') - # Iterate through instruments - inventory, keywords = [], {} - for instrument in instruments: - ins = [instrument] - for dp in dataproducts: - count = instrument_inventory(instrument, dataproduct=dp, caom=caom) - ins.append(count) - - # Get the total - ins.append(sum(ins[-3:])) - - # Add it to the list - inventory.append(ins) - - # Add the keywords to the dict - keywords[instrument] = instrument_keywords(instrument, caom=caom) - - logging.info('Completed database search for {} instruments and {} data products.'. - format(instruments, dataproducts)) - - # Make the table - all_cols = ['instrument'] + dataproducts + ['total'] - table = pd.DataFrame(inventory, columns=all_cols) - - # Plot it - if plot: - # Determine plot location and names - output_dir = get_config()['outputs'] - - if caom: - output_filename = 'database_monitor_caom' - else: - output_filename = 'database_monitor_jwst' - - # Make the plot - plt = bar_chart(table, 'instrument', dataproducts, - title="JWST Inventory") - - # Save the plot as full html - html_filename = output_filename + '.html' - outfile = os.path.join(output_dir, 'monitor_mast', html_filename) - output_file(outfile) - save(plt) - set_permissions(outfile) - - logging.info('Saved Bokeh plots as HTML file: {}'.format(html_filename)) - - # Save the plot as components - plt.sizing_mode = 'stretch_both' - script, div = components(plt) - - div_outfile = os.path.join(output_dir, 'monitor_mast', output_filename + "_component.html") - with open(div_outfile, 'w') as f: - f.write(div) - f.close() - set_permissions(div_outfile) - - script_outfile = os.path.join(output_dir, 'monitor_mast', output_filename + "_component.js") - with open(script_outfile, 'w') as f: - f.write(script) - f.close() - set_permissions(script_outfile) - - logging.info('Saved Bokeh components files: {}_component.html and {}_component.js'.format( - output_filename, output_filename)) - - # Melt the table - table = pd.melt(table, id_vars=['instrument'], - value_vars=dataproducts, - value_name='files', var_name='dataproduct') - - return table, keywords - - -@log_fail -@log_info -def monitor_mast(): - """Tabulates the inventory of all JWST data products in the MAST - archive and generates plots. - """ - logging.info('Beginning database monitoring.') - - # Perform inventory of the JWST service - jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, - dataproducts=['image', 'spectrum', 'cube'], - caom=False, plot=True) - - # Perform inventory of the CAOM service - jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, - dataproducts=['image', 'spectrum', 'cube'], - caom=True, plot=True) - - -if __name__ == '__main__': - - # Configure logging - module = os.path.basename(__file__).strip('.py') - start_time, log_file = monitor_utils.initialize_instrument_monitor(module) - - # Run the monitors - monitor_mast() - monitor_utils.update_monitor_table(module, start_time, log_file) diff --git a/jwql/jwql_monitors/remove_inventory_items.py b/jwql/jwql_monitors/remove_inventory_items.py new file mode 100644 index 000000000..9f6d7e4d0 --- /dev/null +++ b/jwql/jwql_monitors/remove_inventory_items.py @@ -0,0 +1,116 @@ +#! /usr/bin/env python + +"""Remove items from the preview image or thumbnail inventory file based on instrument and/or program number. +This will open the preview image or thumbnail invnetory file for the given instrument, and delete all lines that +contain the given string. The location of the preview or thumbnail inventory file to be examined is determined +using the JWQL config.json file. + +Authors +------- + + - Bryan Hilbert + +Use +--- + + This script is intended to be executed as shown below. In this example, it will remove all entries from + the nircam preview image inventory file that contain the string "jw01022". The -i option is used to + designate the instrument. The -p option is used to designate the preview image inventory file ('p') + or the thumbnail inventory file ('t'). The -s option is used to define the string to search for in + each line. + + :: + + python remove_intentory_items.py -i nircam -p p -s jw01022 + +""" + +import argparse +import os +from jwql.utils.utils import get_config + + +def run(instrument, prev_or_thumb, str_to_exclude): + """The main function. Locates the inventory file using config.json, opens + it, and removes any lines containing ``str_to_exclude``. + + Parameters + ---------- + instrument : str + Name of instrument, all lowercase. e.g. 'nircam' + + prev_or_thumb : str + Either 'p', which specifies to work on the preview image inventory file, + or 't', indicating to work on the thumbnail inventory file. + + str_to_exclude : str + Any lines in the inventory file contianing this string will be removed. + """ + config = get_config() + + if prev_or_thumb == 'p': + basedir = config['preview_image_filesystem'] + filename = f'preview_image_inventory_{instrument}.txt' + elif prev_or_thumb == 't': + basedir = config['thumbnail_filesystem'] + filename = f'thumbnail_inventory_{instrument}.txt' + filename = os.path.join(basedir, filename) + + newlines = [] + fobj = open(filename, 'r') + count = 0 + while True: + count += 1 + + # Get next line from file + line = fobj.readline() + + # if line is empty + # end of file is reached + if not line: + break + + if str_to_exclude not in line: + newlines.append(line) + fobj.close() + + os.remove(filename) + + newfile = open(filename, 'w') + newfile.writelines((newlines)) + newfile.close() + + +def define_options(parser=None, usage=None, conflict_handler='resolve'): + """Add command line options + + Parrameters + ----------- + parser : argparse.parser + Parser object + + usage : str + Usage string + + conflict_handler : str + Conflict handling strategy + + Returns + ------- + parser : argparse.parser + Parser object with added options + """ + if parser is None: + parser = argparse.ArgumentParser(usage=usage, conflict_handler=conflict_handler) + + parser.add_argument('-i', '--instrument', type=str, default=None, choices=['niriss', 'nircam', 'nirspec', 'miri', 'fgs'], help='Instrument. (default=%(default)s)') + parser.add_argument('-p', '--prev_or_thumb', type=str, default=None, choices=['p', 't'], help='Work on preview images (p) or thumbnails (t)') + parser.add_argument('-s', '--str_to_exclude', type=str, help='String controlling which entries are removed.') + return parser + + +if __name__ == '__main__': + parser = define_options() + args = parser.parse_args() + + run(args.instrument, args.prev_or_thumb, args.str_to_exclude) diff --git a/jwql/shared_tasks/celery_cheatsheet.md b/jwql/shared_tasks/celery_cheatsheet.md new file mode 100644 index 000000000..32469403c --- /dev/null +++ b/jwql/shared_tasks/celery_cheatsheet.md @@ -0,0 +1,220 @@ +# Cheatsheet on using the JWQL `celery` and `redis` task server + +## Very quick start-up + +Note that this start-up assumes that *nothing* is currently running anywhere in terms of +redis or celery. Or at least that nothing is *supposed* to be running. + +- **Set up config.json**: Make sure that it has the following entries: + - `"redis_host"`: `"pljwql2.stsci.edu"`, + - `"redis_port"`: `"6379"`, + - `"transfer_dir"`: `"$CENTRAL_STORAGE/transfer/dev"` +- **Start Redis**: + - Log into `pljwql2` and change to the `svc_jwqladm_mon` account + - Activate the celery environment (currently named `jwql_celery_38`) + - Run `ps -e | grep redis-server`. If any process shows up, kill it. + - Run `redis-server --protected-mode no &` +- **Start Celery**: Do this on *each* of `pljwql3`, `pljwql4`, `pljwql5`, and `pljwql6` + - Log in to the server and change to the `svc_jwqladm_mon` account + - Activate the `jwql_celery_38` environment + - Go to the `$REPOSITORY/jwql/shared_tasks` directory + - Run `ps -e | grep python`. If there are any running processes, run `ps auxww | grep 'python' | awk '{print $2}' | xargs kill -9` + - Run `celery -A shared_tasks purge`. Hit "y" at the prompt. **NOTE** You only need to do this on one of the servers. + - Run `celery -A shared_tasks worker -D -E -ldebug -Ofair -c1 --max-tasks-per-child=1 --prefetch-multiplier 1` + +Now, the next time that any of the monitors starts up, it should see the celery servers and +be able to give them tasks. The monitor logs should show this activity, and there should be +logs of the celery servers available in the usual log location under the name "shared_tasks". + +## Introduction + +`celery` is a task server infrastructure, which means that, if you have processing work +that needs to be done, and that you don't want to do in your main process for some reason +(e.g. memory leaks, resource usage, not wanting multiple tasks to run at the same time, +not wanting the same task to be run multiple times at once, etc.) `celery` provides the +ability to + +- set up work queues to send tasks to +- assign workers to listen to queues and pick up tasks from them +- co-ordinate between workers as to which worker will handle which task +- keep track of tasks and their status +- pass input data to workers and pass result data back to the calling process +- execute callback functions on the calling processes (if provided) +- schedule tasks for future execution +- revoke tasks that are no longer wanted or needed + +In order to co-ordinate between multiple independent workers, celery relies on one of +several services to maintain state. For JWQL, `redis` is used as the task storage and +co-ordination method, and to maintain task status. For the task server to work, you must +have: + +- a single `redis` server, with a known location (server and port). Currently `redis` runs + on `pljwql2`, and uses port 6379 +- one or more `celery` workers, running on one or more servers, and co-ordinating through + `redis`. Currently JWQL runs only one worker thread on any given server, but runs threads + on each of `pljwql3`, `pljwql4`, `pljwql5`, and `pljwql6` +- one or more work queues, which the calling processes (monitors) will send to, and which + the workers (celery) will read from. Currently JWQL uses only a single work queue. +- one or more programs that send tasks to the workers. Currently the JWQL monitors are + those programs, and they are all run on `pljwql1` + +## Celery Command Cheatsheet + +- **Starting Celery:** + - Log in to the server on which you want to start celery, *and* make sure that no `python` + processes are already running on that server. Currently `pljwql3..6` are reserved for + `celery` workers. + - Log in as the appropriate service user + - Change to the JWQL `shared_tasks` directory (`$REPOSITORY/jwql/shared_tasks`) + - Start `celery` in detached mode with `celery -A shared_tasks worker -D -E -ldebug -Ofair -c1 --max-tasks-per-child=1 --prefetch-multiplier 1` +- **Shutting Down Celery Workers:** + - Log in to any server on which `celery` is running (`pljwql3..6`) + - Log in as the appropriate service user + - Change to the JWQL `shared_tasks` directory + - Run `celery -A shared_tasks control shutdown` + - Celery worker threads will complete their current task and then exit +- **Killing Celery Workers:** + - Log in to any server on which `celery` is running (`pljwql3..6`) + - Log in as the appropriate service user + - Change to the JWQL `shared_tasks` directory + - Run `ps auxww | grep 'python' | awk '{print $2}' | xargs kill -9` + - Celery worker threads will exit immediately, and may not complete their work, update + their task status, or release locks +- **Clearing Saved Tasks:** + - Make sure that no JWQL monitors are running + - Log in to any server on which `celery` is running (`pljwql3..6`) + - Log in as the appropriate service user + - Change to the JWQL `shared_tasks` directory + - Wait until all `celery` workers have shut down + - Run `celery -A shared_tasks purge` + - This will discard any tasks that have not been completed. If a monitor is currently + running, and is waiting for a task to finish, that task will be purged by this command, + and the monitor will wait forever for the task to return, so if you didn't make sure + that no monitors were running, you will now have to kill any monitor that's currently + waiting for a task. + +## Redis Command Cheatsheet + +- **Running Redis:** + - The JWQL config file has 2 values for redis, `redis_host` and `redis_port` + - To run `redis`, ssh to the server `redis_host`, and change to the appropriate account + for that host (ops, test, dev, etc.) + - `redis_port` tells you which port `redis` should use to listen for connections, and + which port `celery` should use to connect to redis. The default value is 6379. If you + need to run `redis` on a different port, then run `redis-server` with `--port PORT` + - Run redis with `redis-server --protected-mode no &`. If you will be running jwql + monitors **and** `celery` on the same server that `redis` is running on, then you can + omit the `--protected-mode no` argument (having that argument allows redis to accept + connections from servers other than `localhost`). +- **Deleting a Redis lock:** + - *Before you do this, make sure that the process which has the lock has actually crashed or finished without releasing it* + - Find the name of the lock you need to clear + - ssh to `redis_host` and change to the appropriate service account + - run `redis-cli del ` where `` is the name of the lock to be deleted +- **Deleting all Redis file locks:** + - *Before you do this, make sure that the process which has the lock has actually crashed or finished without releasing it* + - ssh to `redis_host` and change to the appropriate service account + - run `redis-cli --scan --pattern 'jw*' | xargs redis-cli del` +- **Stopping Redis:** + - ssh to `redis_host` and change to the appropriate service account + - run `ps -e | grep redis` and mark down the process number of `redis-server` + - run `kill ` where `` is the process number from the previous step + - `redis` will exit gracefully from a `kill` command. Don't use `kill -9` unless a + standard `kill` fails to clear the process. + +## Using Redis for locking + +In addition to acting as a task broker for celery, `redis` also acts as a persistent +key/value store, which allows it to be used for locking code. Locks should be used to +protect segments of code which should only be running once, no matter how many processes +want to potentially run them. Using locks *can* be dangerous, and can lead to unpredictable +and difficult-to-find bugs and issues at run-time. As a (very) abbreviated primer to +locking, you should keep the following principles in mind: + +- If you get a lock, be sure to release it, even in the case of errors (i.e. run the locked + code in a `try/except` block, and put releasing the lock into `finally`) +- If you need multiple locks, **always** acquire and release them in the same order. + Otherwise, if you have code that needs both Lock A and Lock B to run, then Process 1 can + have Lock A (and be waiting for Lock B), and Process 2 can have Lock B (and be waiting + for Lock A), and the deadlock will persist forever. +- If you set a lock to automatically time out, make sure that the timeout is long enough + that by the time it expires, the process that has the lock has either finished or crashed. + Otherwise you could end up with two processes each thinking they have the lock. +- Before you manually delete a lock to let a process run, make sure that whatever process + has the lock has either finished with it (and failed to release it) or crashed (and + failed to release it). +- Lock as little as you can get away with (but no less) + +JWQL has existing ways to lock a single function, or to create a custom lock. + +### Locking a single function + +To do this, you need to import the `only_one` decorator from `jwql.shared_tasks`, and then +add it as a decorator to to the function to be locked. As an example, + +``` +from jwql.shared_tasks.shared_tasks import only_one + +... +... + +@only_one(key='key', timeout=value) +def function(args): + ... + ... +``` + +Note that key strings are global, so if you use the key "my_lock", then only one function +that uses that lock may execute as a time *anywhere*. If you want to lock out a function +so that only one simultaneous instance of that particular function runs (but you don't +care about other functions), choose a unique name for the key. If in doubt as to what would +make a unique name, using the module path (e.g. `key=jwql.shared\_tasks.share\_tasks.function`) +will be guaranteed to be unique within the `jwql` module. `timeout` is the number of +seconds before the lock will be automatically released. If you never want the lock to +time out, don't provide any value for the `timeout` parameter. + +### Using a custom lock + +For a worked example of this, look at the `run_pipeline` function in `jwql/shared_tasks/shared_tasks.py`. + +In order to create a custom lock, you need to import the `REDIS_CLIENT` instance from +`jwql.shared_tasks.shared_tasks`, and then use the `redis` `lock()` and `acquire()` +functions. As an example, + +``` +from jwql.shared_tasks.shared_tasks import REDIS_CLIENT + +... +... + +def some_function(args): + ... + ... + my_lock = REDIS_CLIENT.lock(lock_name, timeout=value) + have_lock = my_lock.acquire(blocking=True) + if have_lock: + try: + ... + ... + finally: + my_lock.release() +``` + +In this case, `lock_name` acts the same as `key` above, and `timeout` works in exactly the +same way as it does above. When acquiring the lock, `blocking` describes whether you want +the code to wait until the lock is available, and then acquire it (`blocking=True`), or +whether you want the function to return whether or not you have the lock into the `have_lock` +variable (`blocking=False`). In the case where `blocking=False`, if `have_lock=False` then +the lock is already in use, and you must **not** execute any code that requires the lock. +If your code has no way to proceed without the lock, then you should use `blocking=True`. + +## Testing Celery and Redis + +- Create and activate your test environment +- Make sure that in the ``config.json`` file, + - ``redis_host`` is set to localhost + - ``test_data`` is set appropriately + - ``transfer_dir`` is set to the test or dev directory as appropriate +- Start redis with ``redis-server &`` +- Start celery with ``celery -A shared_tasks worker -D -E -ldebug -Ofair -c1 --max-tasks-per-child=1 --prefetch-multiplier 1`` +- Run ``jwql/tests/test_redis_celery.py`` diff --git a/jwql/shared_tasks/run_pipeline.py b/jwql/shared_tasks/run_pipeline.py new file mode 100755 index 000000000..07191e49a --- /dev/null +++ b/jwql/shared_tasks/run_pipeline.py @@ -0,0 +1,394 @@ +#!/usr/bin/env python + +import argparse +from astropy.io import fits +from collections import OrderedDict +from copy import deepcopy +from glob import glob +import json +import os +import shutil +import sys +import time +import traceback + +from jwst import datamodels +from jwst.dq_init import DQInitStep +from jwst.dark_current import DarkCurrentStep +from jwst.firstframe import FirstFrameStep +from jwst.group_scale import GroupScaleStep +from jwst.ipc import IPCStep +from jwst.jump import JumpStep +from jwst.lastframe import LastFrameStep +from jwst.linearity import LinearityStep +from jwst.persistence import PersistenceStep +from jwst.pipeline.calwebb_detector1 import Detector1Pipeline +from jwst.ramp_fitting import RampFitStep +from jwst.refpix import RefPixStep +from jwst.rscd import RscdStep +from jwst.saturation import SaturationStep +from jwst.superbias import SuperBiasStep + +from jwql.instrument_monitors.pipeline_tools import PIPELINE_STEP_MAPPING, completed_pipeline_steps, get_pipeline_steps +from jwql.utils.logging_functions import configure_logging +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path + + +def run_pipe(input_file, short_name, work_directory, instrument, outputs, max_cores='all', step_args={}): + """Run the steps of ``calwebb_detector1`` on the input file, saving the result of each + step as a separate output file, then return the name-and-path of the file as reduced + in the reduction directory. + """ + input_file_basename = os.path.basename(input_file) + start_dir = os.path.dirname(input_file) + status_file_name = short_name + "_status.txt" + status_file = os.path.join(work_directory, status_file_name) + uncal_file = os.path.join(work_directory, input_file_basename) + + with open(status_file, 'a+') as status_f: + status_f.write("Running run_pipe\n") + status_f.write("\t input_file_basename is {} ({})\n".format(input_file_basename, type(input_file_basename))) + status_f.write("\t start_dir is {} ({})\n".format(start_dir, type(start_dir))) + status_f.write("\t uncal_file is {} ({})\n".format(uncal_file, type(uncal_file))) + status_f.write(f"\t outputs is {outputs}\n") + sys.stderr.write("Running run_pipe\n") + sys.stderr.write("\t input_file_basename is {} ({})\n".format(input_file_basename, type(input_file_basename))) + sys.stderr.write("\t start_dir is {} ({})\n".format(start_dir, type(start_dir))) + sys.stderr.write("\t uncal_file is {} ({})\n".format(uncal_file, type(uncal_file))) + sys.stderr.write(f"\t outputs is {outputs}\n") + + try: + sys.stderr.write("Copying file {} to working directory.\n".format(input_file)) + copy_files([input_file], work_directory) + sys.stderr.write("Setting permissions on {}\n".format(uncal_file)) + set_permissions(uncal_file) + + steps = get_pipeline_steps(instrument) + sys.stderr.write("Pipeline steps initialized to {}\n".format(steps)) + + # If the input file is a file other than uncal.fits, then we may only need to run a + # subset of steps. Check the completed steps in the input file. Find the latest step + # that has been completed, and skip that plus all prior steps + if 'uncal' not in input_file: + completed_steps = completed_pipeline_steps(input_file) + sys.stderr.write("Steps {} already completed.\n".format(completed_steps)) + + # Reverse the boolean value, so that now steps answers the question: "Do we need + # to run this step?"" + for step in steps: + steps[step] = not completed_steps[step] + + # Make sure we don't run steps out of order. Find the latest step that has been + # run, and only run subsequent steps. This protects against cases where some early + # step was not run. In that case, we don't want to go back and run it because running + # pipeline steps out of order doesn't work. + if instrument in ['miri', 'nirspec']: + last_run = 'group_scale' # initialize to the first step + else: + last_run = 'dq_init' + + for step in steps: + if not steps[step]: + sys.stderr.write("Setting last_run to {}.\n".format(step)) + last_run = deepcopy(step) + + for step in steps: + if step == last_run: + break + if step != last_run: + sys.stderr.write("Setting {} to skip while looking for last_run.\n".format(step)) + steps[step] = False + + # Set any steps the user specifically asks to skip + for step, step_dict in step_args.items(): + if 'skip' in step_dict: + if step_dict['skip']: + sys.stderr.write("Setting step {} to skip by user request.\n".format(step)) + steps[step] = False + + # Run each specified step + first_step_to_be_run = True + for step_name in steps: + kwargs = {} + if step_name in step_args: + kwargs = step_args[step_name] + if step_name in ['jump', 'rate']: + kwargs['maximum_cores'] = max_cores + if steps[step_name]: + sys.stderr.write("Running step {}\n".format(step_name)) + with open(status_file, 'a+') as status_f: + status_f.write("Running step {}\n".format(step_name)) + output_file_name = short_name + "_{}.fits".format(step_name) + output_file = os.path.join(work_directory, output_file_name) + # skip already-done steps + if not os.path.isfile(output_file): + if first_step_to_be_run: + model = PIPELINE_STEP_MAPPING[step_name].call(input_file, **kwargs) + first_step_to_be_run = False + else: + model = PIPELINE_STEP_MAPPING[step_name].call(model, **kwargs) + + if step_name != 'rate': + # Make sure the dither_points metadata entry is at integer (was a + # string prior to jwst v1.2.1, so some input data still have the + # string entry. + # If we don't change that to an integer before saving the new file, + # the jwst package will crash. + try: + model.meta.dither.dither_points = int(model.meta.dither.dither_points) + except TypeError: + # If the dither_points entry is not populated, then ignore this + # change + pass + model.save(output_file) + else: + try: + model[0].meta.dither.dither_points = int(model[0].meta.dither.dither_points) + except TypeError: + # If the dither_points entry is not populated, then ignore this change + pass + model[0].save(output_file) + if 'rateints' in outputs: + outbase = os.path.basename(output_file) + outbase = outbase.replace('rate', 'rateints') + output_file = os.path.join(work_directory, outbase) + model[1].save(output_file) + with open(status_file, 'a+') as status_f: + status_f.write(f"Saved rateints model to {output_file}\n") + done = True + for output in outputs: + output_name = "{}_{}.fits".format(short_name, output) + output_check_file = os.path.join(work_directory, output_name) + if not os.path.isfile(output_check_file): + done = False + if done: + sys.stderr.write("Done pipeline.\n") + break + else: + sys.stderr.write("Skipping step {}\n".format(step_name)) + with open(status_file, 'a+') as status_f: + status_f.write("Skipping step {}\n".format(step_name)) + + except Exception as e: + with open(status_file, "a+") as status_f: + status_f.write("EXCEPTION\n") + status_f.write("{}\n".format(e)) + status_f.write("FAILED\n") + status_f.write(traceback.format_exc()) + sys.exit(1) + + with open(status_file, "a+") as status_f: + status_f.write("SUCCEEDED") + # Done. + + +def run_save_jump(input_file, short_name, work_directory, instrument, ramp_fit=True, save_fitopt=True, max_cores='all', step_args={}): + """Call ``calwebb_detector1`` on the provided file, running all + steps up to the ``ramp_fit`` step, and save the result. Optionally + run the ``ramp_fit`` step and save the resulting slope file as well. + """ + input_file_basename = os.path.basename(input_file) + start_dir = os.path.dirname(input_file) + status_file_name = short_name + "_status.txt" + status_file = os.path.join(work_directory, status_file_name) + uncal_file = os.path.join(work_directory, input_file_basename) + + sys.stderr.write("Starting pipeline\n") + with open(status_file, 'a+') as status_f: + status_f.write("Starting pipeline\n") + + try: + copy_files([input_file], work_directory) + set_permissions(uncal_file) + + # Find the instrument used to collect the data + datamodel = datamodels.RampModel(uncal_file) + instrument = datamodel.meta.instrument.name.lower() + + # If the data pre-date jwst version 1.2.1, then they will have + # the NUMDTHPT keyword (with string value of the number of dithers) + # rather than the newer NRIMDTPT keyword (with an integer value of + # the number of dithers). If so, we need to update the file here so + # that it doesn't cause the pipeline to crash later. Both old and + # new keywords are mapped to the model.meta.dither.dither_points + # metadata entry. So we should be able to focus on that. + if isinstance(datamodel.meta.dither.dither_points, str): + # If we have a string, change it to an integer + datamodel.meta.dither.dither_points = int(datamodel.meta.dither.dither_points) + elif datamodel.meta.dither.dither_points is None: + # If the information is missing completely, put in a dummy value + datamodel.meta.dither.dither_points = 1 + + # Switch to calling the pipeline rather than individual steps, + # and use the run() method so that we can set parameters + # progammatically. + model = Detector1Pipeline() + params = {} + + # Always true + if instrument == 'nircam': + params['refpix'] = dict(odd_even_rows=False) + + # Default CR rejection threshold is too low + params['jump'] = {} + params['jump']['rejection_threshold'] = 15 + + # Set up to save jump step output + params['jump']['save_results'] = True + params['jump']['output_dir'] = work_directory + params['jump']['maximum_cores'] = max_cores + jump_output = short_name + '_jump.fits' + + # Check to see if the jump version of the requested file is already + # present + run_jump = not os.path.isfile(jump_output) + + if ramp_fit: + params['ramp_fit'] = dict(save_results=True, maximum_cores=max_cores) + + pipe_output = os.path.join(work_directory, short_name + '_0_ramp_fit.fits') + run_slope = not os.path.isfile(pipe_output) + if save_fitopt: + params['ramp_fit']['save_opt'] = True + fitopt_output = os.path.join(work_directory, short_name + '_fitopt.fits') + run_fitopt = not os.path.isfile(fitopt_output) + else: + params['ramp_fit']['save_opt'] = False + fitopt_output = None + run_fitopt = False + else: + params['ramp_fit']['skip'] = True + pipe_output = None + fitopt_output = None + run_slope = False + run_fitopt = False + + # If the input file is dark.fits rather than uncal.fits, then skip + # all of the pipeline steps that are run prior to dark subtraction + if 'dark.fits' in input_file: + if instrument.lower() == 'miri': + steps_to_skip = ['group_scale', 'dq_init', 'saturation', 'ipc', 'firstframe', + 'lastframe', 'reset', 'linearity', 'rscd'] + else: + steps_to_skip = ['group_scale', 'dq_init', 'saturation', 'ipc', 'superbias', + 'refpix', 'linearity'] + for step in steps_to_skip: + step_dict = dict(skip=True) + if step in params: + params[step] = params[step].update(step_dict) + else: + params[step] = dict(skip=True) + else: + # Turn off IPC step until it is put in the right place + params['ipc'] = dict(skip=True) + + # Include any user-specified parameters + for step_name in step_args: + if step_name in params: + params[step_name] = params[step_name].update(step_args[step_name]) + else: + params[step_name] = step_args[step_name] + + if run_jump or (ramp_fit and run_slope) or (save_fitopt and run_fitopt): + model.call(datamodel, output_dir=work_directory, steps=params) + else: + print(("Files with all requested calibration states for {} already present in " + "output directory. Skipping pipeline call.".format(uncal_file))) + except Exception as e: + with open(status_file, "a+") as status_f: + status_f.write("EXCEPTION\n") + status_f.write("{}\n".format(e)) + status_f.write("FAILED\n") + status_f.write(traceback.format_exc()) + sys.exit(1) + + with open(status_file, "a+") as status_f: + status_f.write("{}\n".format(jump_output)) + status_f.write("{}\n".format(pipe_output)) + status_f.write("{}\n".format(pipe_output.replace("0_ramp", "1_ramp"))) + status_f.write("{}\n".format(fitopt_output)) + status_f.write("SUCCEEDED") + # Done. + + +if __name__ == '__main__': + status_dir = os.path.join(get_config()['outputs'], 'calibrated_data') + general_status_file = os.path.join(status_dir, "general_status.txt") + + with open(general_status_file, "w") as status_file: + status_file.write("Started at {}\n".format(time.ctime())) + status_file.write("\targv={}\n".format(sys.argv)) + + file_help = 'Input file to calibrate' + path_help = 'Directory in which to do the calibration' + ins_help = 'Instrument that was used to produce the input file' + pipe_help = 'Pipeline type to run (valid values are "jump" and "cal")' + out_help = 'Comma-separated list of output extensions (for cal only, otherwise just "all")' + name_help = 'Input file name with no path or extensions' + cores_help = 'Maximum cores to use (default "all")' + step_args_help = 'Step-specific parameter value nested dictionary' + parser = argparse.ArgumentParser(description='Run local calibration') + parser.add_argument('pipe', metavar='PIPE', type=str, help=pipe_help) + parser.add_argument('outputs', metavar='OUTPUTS', type=str, help=out_help) + parser.add_argument('working_path', metavar='PATH', type=str, help=path_help) + parser.add_argument('instrument', metavar='INSTRUMENT', type=str, help=ins_help) + parser.add_argument('input_file', metavar='FILE', type=str, help=file_help) + parser.add_argument('short_name', metavar='NAME', type=str, help=name_help) + parser.add_argument('max_cores', metavar='CORES', type=str, help=cores_help) + parser.add_argument('--step_args', metavar='STEP_ARGS', type=json.loads, default='{}', help=step_args_help) + + with open(general_status_file, "a+") as status_file: + status_file.write("Created argument parser at {}\n".format(time.ctime())) + + try: + args = parser.parse_args() + except Exception as e: + with open(general_status_file, "a+") as status_file: + status_file.write("Error parsing arguments.\n") + status_file.write("{}".format(e)) + raise e + + with open(general_status_file, "a+") as status_file: + status_file.write("Finished parsing args at {}\n".format(time.ctime())) + + input_file = args.input_file + instrument = args.instrument + short_name = args.short_name + working_path = args.working_path + pipe_type = args.pipe + outputs = args.outputs + step_args = args.step_args + + status_file = os.path.join(working_path, short_name + "_status.txt") + with open(status_file, 'w') as out_file: + out_file.write("Starting Process\n") + out_file.write("\tpipeline is {} ({})\n".format(pipe_type, type(pipe_type))) + out_file.write("\toutputs is {} ({})\n".format(outputs, type(outputs))) + out_file.write("\tworking_path is {} ({})\n".format(working_path, type(working_path))) + out_file.write("\tinstrument is {} ({})\n".format(instrument, type(instrument))) + out_file.write("\tinput_file is {} ({})\n".format(input_file, type(input_file))) + out_file.write("\tshort_name is {} ({})\n".format(short_name, type(short_name))) + out_file.write("\tstep_args is {} ({})\n".format(step_args, type(step_args))) + + if not os.path.isfile(args.input_file): + raise FileNotFoundError("No input file {}".format(args.input_file)) + + if pipe_type not in ['jump', 'cal']: + raise ValueError("Unknown calibration type {}".format(pipe_type)) + + try: + if pipe_type == 'jump': + with open(status_file, 'a+') as out_file: + out_file.write("Running jump pipeline.\n") + run_save_jump(input_file, short_name, working_path, instrument, ramp_fit=True, save_fitopt=True, max_cores=args.max_cores, step_args=args.step_args) + elif pipe_type == 'cal': + with open(status_file, 'a+') as out_file: + out_file.write("Running cal pipeline.\n") + outputs = outputs.split(",") + run_pipe(input_file, short_name, working_path, instrument, outputs, max_cores=args.max_cores, step_args=args.step_args) + except Exception as e: + with open(status_file, 'a+') as out_file: + out_file.write("Exception when starting pipeline.\n") + out_file.write("{}\n".format(e)) + raise e diff --git a/jwql/shared_tasks/shared_tasks.py b/jwql/shared_tasks/shared_tasks.py new file mode 100644 index 000000000..f076e2035 --- /dev/null +++ b/jwql/shared_tasks/shared_tasks.py @@ -0,0 +1,866 @@ +#! /usr/bin/env python + +"""This module contains code for the celery application, which is used for any demanding +work which should be restricted in terms of how many iterations are run simultaneously, or +which should be offloaded to a separate server as allowed. Currently, celery tasks exist +for: + +- Running the JWST pipeline on provided data files + +In general, tasks should be created or used in situations where having multiple monitors +(or parts of the website, etc.) running the same task would be wasteful (or has the +potential for crashes due to system resources being exhausted). Tasks may be useful if +multiple independent monitors might need the same thing (e.g. pipeline processing of the +same data file), and having each of them producing that thing independently would be +wasteful in terms of time and resources. If a task covers *both* cases, then it is +particularly useful. + +Because multiple monitors may be running at the same time, and may need the same task +performed, and because running the same task multiple times would be as wasteful as just +having each monitor run it independently, the celery-singleton module is used to require +task uniqueness. This is transparent to the monitors involved, as a duplicate task will be +given the same AsyncResult object as the existing task asking for the same resource, so +the monitor can simply proceed as if it were the only one requesting the task. + +Author +------ + + - Brian York + +Use +--- + +The basic method of running a celery task is to use the provided ``run_pipeline()`` +convenience function:: + + # This can, of course, be a relative import + from jwql.shared_tasks.shared_tasks import run_pipeline + + def some_function(some_arguments): + # ... do some work ... + + # This returns the calibrated file's name and output location, where the output + # file will be transferred into the same internal location as the input file. It + # will block (i.e. wait) until the calibration has finished before returning. If + # the calibration raises an exception, it will also raise an exception. + output_file_or_files = run_pipeline(input_file, input_extension, requested_extensions, instrument, jump_pipe=False) + + # ... do other work ... + +If you want to queue up multiple instances of the same task, and get the results back as +a list:: + + from jwql.shared_tasks.shared_tasks import run_parallel_pipeline + + # ... + + # This version will take a list of input files, and will take either a single list + # of requested extensions (which will be applied to every input file) *or* a dictionary + # of requested extensions indexed by the names of the input files. It will return a + # dictionary of output files, indexed by the names of the input files. It will block + # until complete. + outputs = run_parallel_pipeline(input_files, input_ext, requested_exts, instrument, jump_pipe=False) + + # ... + +It is possible to set up non-blocking celery tasks, or to do other fancy things, but as of +yet it hasn't been worth putting together a convenience function that will do that. + +There are many other ways to call and use tasks, including ways to group tasks, run them +synchronously, run a group of tasks with a final callback function, etc. These are best +explained by the celery documentation itself. +""" +from astropy.io import fits +from collections import OrderedDict +from copy import deepcopy +import gc +from glob import glob +import json +import logging +from logging import FileHandler, StreamHandler +import os +import redis +import shutil +from subprocess import Popen, PIPE, run, STDOUT +import sys + +from astropy.io import fits + +from jwst import datamodels +from jwst.dq_init import DQInitStep +from jwst.dark_current import DarkCurrentStep +from jwst.firstframe import FirstFrameStep +from jwst.group_scale import GroupScaleStep +from jwst.ipc import IPCStep +from jwst.jump import JumpStep +from jwst.lastframe import LastFrameStep +from jwst.linearity import LinearityStep +from jwst.persistence import PersistenceStep +from jwst.pipeline.calwebb_detector1 import Detector1Pipeline +from jwst.ramp_fitting import RampFitStep +from jwst.refpix import RefPixStep +from jwst.rscd import RscdStep +from jwst.saturation import SaturationStep +from jwst.superbias import SuperBiasStep + +from jwql.instrument_monitors.pipeline_tools import PIPELINE_STEP_MAPPING, get_pipeline_steps +from jwql.utils.logging_functions import configure_logging +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path + +from celery import Celery +from celery.app.log import TaskFormatter +from celery.signals import after_setup_logger, after_setup_task_logger, task_postrun +from celery.utils.log import get_task_logger + +try: + REDIS_HOST = get_config()["redis_host"] + REDIS_PORT = get_config()["redis_port"] +except FileNotFoundError as e: + REDIS_HOST = "127.0.0.1" + REDIS_PORT = "6379" +REDIS_URL = "redis://{}:{}".format(REDIS_HOST, REDIS_PORT) +REDIS_CLIENT = redis.Redis(host=REDIS_HOST, port=REDIS_PORT) + +# Okay, let's explain these options: +# - the first argument ('shared_tasks') is the task queue to listen to. We only have one, +# and we've named it 'shared_tasks'. Both the clients (monitors) and the server(s) +# (workers) need to use the same queue so that the workers are taking tasks from the +# same place that the monitors are putting them. +# - the broker is the server that keeps track of tasks and task IDs. redis does this +# - the backend is the server that keeps track of events. redis does this too +# - worker_mask_tasks_per_child is how many tasks a process can run before it gets +# restarted and replaced. This is set to 1 because the pipeline has memory leaks. +# - worker_prefetch_multiplier is how many tasks a worker can reserve for itself at a +# time. If set to 0, a worker can reserve any number. If set to an integer, a single +# worker can reserve that many tasks. We don't really want workers reserving tasks +# while they're already running a task, because tasks can take a long time to finish, +# and there's no reason for the other workers to be sitting around doing nothing while +# all the monitors are waiting on a single worker. +# - worker_concurrency is how many task threads a worker can run concurrently on the same +# machine. It's set to 1 because an individual pipeline process can consume all of the +# available memory, so setting the concurrency higher will result in inevitable crashes. +# - the broker visibility timeout is the amount of time that redis will wait for a +# worker to say it has completed a task before it will dispatch the task (again) to +# another worker. This should be set to longer than you expect to wait for a single +# task to finish. Currently set to 1 day. +celery_app = Celery('shared_tasks', broker=REDIS_URL, backend=REDIS_URL) +celery_app.conf.update(worker_max_tasks_per_child=1) +celery_app.conf.update(worker_prefetch_multiplier=1) +celery_app.conf.update(task_acks_late=True) +celery_app.conf.update(worker_concurrency=1) +celery_app.conf.broker_transport_options = {'visibility_timeout': 14400} + + +def only_one(function=None, key="", timeout=None): + """Enforce only one of the function running at a time. Import as decorator.""" + + def _dec(run_func): + """Decorator.""" + + def _caller(*args, **kwargs): + """Caller.""" + ret_value = None + have_lock = False + lock = REDIS_CLIENT.lock(key, timeout=timeout) + try: + have_lock = lock.acquire(blocking=False) + if have_lock: + ret_value = run_func(*args, **kwargs) + else: + logging.warning("Lock {} is already in use.".format(key)) + msg = "If you believe that this is a stale lock, log in to {}" + msg += " and enter 'redis-cli del {}'" + logging.warning(msg.format(get_config()['redis_host'], key)) + finally: + if have_lock: + lock.release() + + return ret_value + + return _caller + + return _dec(function) if function is not None else _dec + + +def create_task_log_handler(logger, propagate): + log_file_name = configure_logging('shared_tasks') + output_dir = os.path.join(get_config()['outputs'], 'calibrated_data') + ensure_dir_exists(output_dir) + celery_log_file_handler = FileHandler(log_file_name) + logger.addHandler(celery_log_file_handler) + for handler in logger.handlers: + handler.setFormatter(TaskFormatter('%(asctime)s - %(task_id)s - %(task_name)s - %(name)s - %(levelname)s - %(message)s')) + logger.propagate = propagate + if not os.path.exists(os.path.join(output_dir, "celery_pipeline_log.cfg")): + with open(os.path.join(output_dir, "celery_pipeline_log.cfg"), "w") as cfg_file: + cfg_file.write("[*]\n") + cfg_file.write("level = WARNING\n") + cfg_file.write("handler = append:{}\n".format(log_file_name)) + + +def log_subprocess_output(pipe): + """ + If a subprocess STDOUT has been set to subprocess.PIPE, this function will log each + line to the logging output. + """ + for line in iter(pipe.readline, b''): # b'\n'-separated lines + logging.info("\t{}".format(line.decode('UTF-8').strip())) + + +@after_setup_task_logger.connect +def after_setup_celery_task_logger(logger, **kwargs): + """ This function sets the 'celery.task' logger handler and formatter """ + create_task_log_handler(logger, True) + + +@after_setup_logger.connect +def after_setup_celery_logger(logger, **kwargs): + """ This function sets the 'celery' logger handler and formatter """ + create_task_log_handler(logger, False) + + +@task_postrun.connect +def collect_after_task(**kwargs): + gc.collect() + + +def convert_step_args_to_string(args_dict): + """Convert the nested dictionary containing pipeline step parameter keyword/value pairs + to a string so that it can be passed via command line + + Parameters + ---------- + args_dict : dict + Nested dictionary. Top level keys are pipeline step names. Values are dictionaries containing + keyword value pairs for that step. + + Returns + ------- + args_str : str + String representation of ``args_dict`` + """ + args_str = "'{" + + for i, step in enumerate(args_dict): + args_str += f'"{step}":' + args_str += '{' + for j, (param, val) in enumerate(args_dict[step].items()): + args_str += f'"{param}":"{val}"' + if j < len(args_dict[step]) - 1: + args_str += ', ' + args_str += "}" + if i < len(args_dict) - 1: + args_str += ',' + args_str += "}'" + return args_str + + +def run_subprocess(name, cmd, outputs, cal_dir, ins, in_file, short_name, res_file, cores, step_args): + # Convert step_args dictionary to a string so that it can be passed via command line. + # For some reason, json.dumps() doesn't seem to work correctly, so we use a custom function. + step_args_str = convert_step_args_to_string(step_args) + + command = "{} {} {} '{}' {} {} {} {} --step_args {}" + command = command.format(name, cmd, outputs, cal_dir, ins, in_file, short_name, cores, step_args_str) + logging.info("Running {}".format(command)) + process = Popen(command, shell=True, executable="/bin/bash", stderr=PIPE) + with process.stderr: + log_subprocess_output(process.stderr) + result = process.wait() + logging.info("Subprocess result was {}".format(result)) + + if not os.path.isfile(res_file): + logging.error("Result file was not created.") + with open(os.path.join(cal_dir, "general_status.txt")) as status_file: + status = status_file.readlines() + for line in status: + logging.error(line.strip()) + return status + + with open(res_file, 'r') as inf: + status = inf.readlines() + return status + + +@celery_app.task(name='jwql.shared_tasks.shared_tasks.run_calwebb_detector1') +def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, step_args={}): + """Run the steps of ``calwebb_detector1`` on the input file, saving the result of each + step as a separate output file, then return the name-and-path of the file as reduced + in the reduction directory. Once all requested extensions have been produced, the + pipeline will return. + + Parameters + ---------- + input_file_name : str + File on which to run the pipeline steps + + short_name : str + Name of the file to be calibrated after any extensions have been stripped off. + + ext_or_exts : list + List of extensions to be retrieved. + + instrument : str + Instrument that was used for the observation contained in input_file_name. + + step_args : dict + A dictionary containing custom arguments to supply to individual pipeline steps. + When a step is run, the dictionary will be checked for a key matching the step + name (as defined in jwql.utils.utils.get_pipeline_steps() for the provided + instrument). The value matching the step key should, itself, be a dictionary that + can be spliced in to step.call() via dereferencing (**dict) + + Returns + ------- + reduction_path : str + The path at which the reduced data file(s) may be found. + """ + msg = "Starting {} calibration task for {}" + logging.info(msg.format(instrument, input_file_name)) + config = get_config() + if isinstance(ext_or_exts, str): + ext_or_exts = [ext_or_exts] + + input_dir = os.path.join(config['transfer_dir'], "incoming") + cal_dir = os.path.join(config['outputs'], "calibrated_data") + output_dir = os.path.join(config['transfer_dir'], "outgoing") + msg = "Input from {}, calibrate in {}, output to {}" + logging.info(msg.format(input_dir, cal_dir, output_dir)) + + input_file = os.path.join(input_dir, input_file_name) + current_dir = os.path.dirname(__file__) + cmd_name = os.path.join(current_dir, "run_pipeline.py") + outputs = ",".join(ext_or_exts) + result_file = os.path.join(cal_dir, short_name + "_status.txt") + if "all" in ext_or_exts: + logging.info("All outputs requested") + if instrument.lower() != 'miri': + out_exts = ["dq_init", "saturation", "superbias", "refpix", "linearity", + "persistence", "dark_current", "jump", "rate"] + else: + out_exts = ["group_scale", "dq_init", "saturation", "firstframe", "lastframe", "reset", + "linearity", "rscd", "dark_current", "refpix", "jump", "rate", "gain_scale"] + + calibrated_files = ["{}_{}.fits".format(short_name, ext) for ext in out_exts] + logging.info("Requesting {}".format(calibrated_files)) + else: + calibrated_files = ["{}_{}.fits".format(short_name, ext) for ext in ext_or_exts] + logging.info("Requesting {}".format(calibrated_files)) + + cores = 'all' + status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, input_file, + short_name, result_file, cores, step_args) + + if status[-1].strip() == "SUCCEEDED": + logging.info("Subprocess reports successful finish.") + else: + managed = False + logging.error("Pipeline subprocess failed.") + core_fail = False + for line in status: + if "[Errno 12] Cannot allocate memory" in line: + core_fail = True + logging.error("\t{}".format(line.strip())) + if core_fail: + cores = "half" + status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, + input_file, short_name, result_file, cores, step_args) + + if status[-1].strip() == "SUCCEEDED": + logging.info("Subprocess reports successful finish.") + managed = True + else: + logging.error("Pipeline subprocess failed.") + core_fail = False + for line in status: + if "[Errno 12] Cannot allocate memory" in line: + core_fail = True + logging.error("\t{}".format(line.strip())) + if core_fail: + cores = "none" + status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, + input_file, short_name, result_file, cores, step_args) + + if status[-1].strip() == "SUCCEEDED": + logging.info("Subprocess reports successful finish.") + managed = True + else: + logging.error("Pipeline subprocess failed.") + if not managed: + raise ValueError("Pipeline Failed") + + for file in calibrated_files: + logging.info("Checking for output {}".format(file)) + if not os.path.isfile(os.path.join(cal_dir, file)): + logging.error("ERROR: {} not found".format(file)) + raise FileNotFoundError(file) + logging.info("Copying output file {}".format(file)) + copy_files([os.path.join(cal_dir, file)], output_dir) + set_permissions(os.path.join(output_dir, file)) + + logging.info("Removing local files.") + files_to_remove = glob(os.path.join(cal_dir, short_name + "*")) + for file_name in files_to_remove: + logging.info("\tRemoving {}".format(file_name)) + os.remove(file_name) + + logging.info("Finished calibration.") + + +@celery_app.task(name='jwql.shared_tasks.shared_tasks.calwebb_detector1_save_jump') +def calwebb_detector1_save_jump(input_file_name, instrument, ramp_fit=True, save_fitopt=True, step_args={}): + """Call ``calwebb_detector1`` on the provided file, running all + steps up to the ``ramp_fit`` step, and save the result. Optionally + run the ``ramp_fit`` step and save the resulting slope file as well. + + Parameters + ---------- + input_file : str + Name of fits file to run on the pipeline + + ramp_fit : bool + If ``False``, the ``ramp_fit`` step is not run. The output file + will be a ``*_jump.fits`` file. + If ``True``, the ``*jump.fits`` file will be produced and saved. + In addition, the ``ramp_fit`` step will be run and a + ``*rate.fits`` or ``*_rateints.fits`` file will be saved. + (``rateints`` if the input file has >1 integration) + + save_fitopt : bool + If ``True``, the file of optional outputs from the ramp fitting + step of the pipeline is saved. + + step_args : dict + A dictionary containing custom arguments to supply to individual pipeline steps. + When a step is run, the dictionary will be checked for a key matching the step + name (as defined in jwql.utils.utils.get_pipeline_steps() for the provided + instrument). The value matching the step key should, itself, be a dictionary that + can be spliced in to step.call() via dereferencing (**dict) + + Returns + ------- + jump_output : str + Name of the saved file containing the output prior to the + ``ramp_fit`` step. + + pipe_output : str + Name of the saved file containing the output after ramp-fitting + is performed (if requested). Otherwise ``None``. + + fitopt_output : str + Name of the saved file containing the output after ramp-fitting + is performed (if requested). Otherwise ``None``. + """ + msg = "Started Save Jump Task on {}. ramp_fit={}, save_fitopt={}" + logging.info(msg.format(input_file_name, ramp_fit, save_fitopt)) + config = get_config() + + input_dir = os.path.join(config["transfer_dir"], "incoming") + cal_dir = os.path.join(config['outputs'], "calibrated_data") + output_dir = os.path.join(config['transfer_dir'], "outgoing") + msg = "Input from {}, calibrate in {}, output to {}" + logging.info(msg.format(input_dir, cal_dir, output_dir)) + + input_file = os.path.join(input_dir, input_file_name) + if not os.path.isfile(input_file): + logging.error("File {} not found!".format(input_file)) + raise FileNotFoundError("{} not found".format(input_file)) + + parts = input_file_name.split('_') + short_name = f'{parts[0]}_{parts[1]}_{parts[2]}_{parts[3]}' + ensure_dir_exists(cal_dir) + output_dir = os.path.join(config["transfer_dir"], "outgoing") + + cmd_name = os.path.join(os.path.dirname(__file__), "run_pipeline.py") + result_file = os.path.join(cal_dir, short_name + "_status.txt") + + cores = 'all' + status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, input_file, + short_name, result_file, cores, step_args) + + if status[-1].strip() == "SUCCEEDED": + logging.info("Subprocess reports successful finish.") + else: + logging.error("Pipeline subprocess failed.") + managed = False + core_fail = False + for line in status: + if "[Errno 12] Cannot allocate memory" in line: + core_fail = True + logging.error("\t{}".format(line.strip())) + if core_fail: + cores = "half" + status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, + input_file, short_name, result_file, cores, step_args) + if status[-1].strip() == "SUCCEEDED": + logging.info("Subprocess reports successful finish.") + managed = True + else: + logging.error("Pipeline subprocess failed.") + core_fail = False + for line in status: + if "[Errno 12] Cannot allocate memory" in line: + core_fail = True + logging.error("\t{}".format(line.strip())) + if core_fail: + cores = "none" + status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, + input_file, short_name, result_file, cores, step_args) + if status[-1].strip() == "SUCCEEDED": + logging.info("Subprocess reports successful finish.") + managed = True + else: + logging.error("Pipeline subprocess failed.") + if not managed: + raise ValueError("Pipeline Failed") + + files = {"jump_output": None, "pipe_output": None, "fitopt_output": None} + for line in status[-5:-1]: + file = line.strip() + logging.info("Copying output file {}".format(file)) + if not os.path.isfile(os.path.join(cal_dir, file)): + logging.error("WARNING: {} not found".format(file)) + else: + copy_files([os.path.join(cal_dir, file)], output_dir) + set_permissions(os.path.join(output_dir, file)) + if "jump" in file: + files["jump_output"] = os.path.join(output_dir, file) + if "ramp" in file: + files["pipe_output"] = os.path.join(output_dir, file) + if "fitopt" in file: + files["fitopt_output"] = os.path.join(output_dir, file) + + logging.info("Removing local files.") + files_to_remove = glob(os.path.join(cal_dir, short_name + "*")) + for file_name in files_to_remove: + logging.info("\tRemoving {}".format(file_name)) + os.remove(file_name) + + logging.info("Finished pipeline") + return files["jump_output"], files["pipe_output"], files["fitopt_output"] + + +def prep_file(input_file, in_ext): + """Prepares a file for calibration by: + + - Creating a short file-name from the file (i.e. the name without the calibration + extension) + - Creating a redis lock on the short name + - Copying the uncalibrated file into the transfer directory + + Returns the lock and the short name. + + Parameters + ---------- + input_file : str + Name of the fits file to run + + in_ext : str + The calibration extension currently present on the input file + + Returns + ------- + lock : redis lock + Acquired lock on the input file + + short_name : str + The exposure ID with the calibration tag and the fits extension chopped off. + + input_name : str + The raw file to be calibrated + """ + config = get_config() + send_path = os.path.join(config["transfer_dir"], "incoming") + ensure_dir_exists(send_path) + receive_path = os.path.join(config["transfer_dir"], "outgoing") + ensure_dir_exists(receive_path) + + input_path, input_name = os.path.split(input_file) + logging.info("\tPath is {}, file is {}".format(input_path, input_name)) + + if not os.path.isfile(input_file): + raise FileNotFoundError("Input File {} does not exist.".format(input_file)) + + output_file_or_files = [] + short_name = input_name.replace("_" + in_ext, "").replace(".fits", "") + logging.info("\tLocking {}".format(short_name)) + cal_lock = REDIS_CLIENT.lock(short_name) + have_lock = cal_lock.acquire(blocking=True) + if not have_lock: + msg = "Waited for lock on {}, and was granted it, but don't have it!" + logging.critical(msg.format(short_name)) + raise ValueError("Redis lock for {} is in an unknown state".format(short_name)) + logging.info("\t\tAcquired Lock.") + logging.info("\t\tCopying {} to {}".format(input_file, send_path)) + copy_files([input_file], send_path) + return short_name, cal_lock, os.path.join(send_path, input_name) + + +def start_pipeline(input_file, short_name, ext_or_exts, instrument, jump_pipe=False, step_args={}): + """Starts the standard or save_jump pipeline for the provided file. + + .. warning:: + + Only call this function if you have already locked the file using Redis. + + This function performs the following steps: + + - Determine whether to call calwebb_detector1 or save_jump tasks + - If calling save_jump, determine which outputs are expected + - Call the task + - return the task result object (so that it can be dealt with appropriately) + + When this function returns, the task may or may not have started, and probably will + not have finished. Because the task was called using the ``delay()`` method, calling + ``result.get()`` will block until the result is available. + + .. warning:: + + This function will not use the ``celery`` settings to trap exceptions, so calling + ``result.get()`` *may* raise an exception if the task itself raises an exception. + + Parameters + ---------- + input_file : str + Name of fits file to run on the pipeline + + ext_or_exts : str or list-of-str + The requested output calibrated files + + instrument : str + Name of the instrument being calibrated + + jump_pipe : bool + Whether the detector1 jump pipeline is being used (e.g. the bad pixel monitor) + + step_args : dict + Pipeline step arguments to be passed to the pipeline call. Nested dictionary with keys that + are the step names (as seen in pipeline_tools.PIPELINE_STEP_MAPPING). Each value is a + dictionary of keyword value pairs that are relevant for that step. + + Returns + ------- + result : celery.result.AsyncResult + The task result object + """ + if isinstance(ext_or_exts, dict): + ext_or_exts = ext_or_exts[short_name] + if jump_pipe: + ramp_fit = False + save_fitopt = False + for ext in ext_or_exts: + if "ramp" in ext: + ramp_fit = True + elif "fitopt" in ext: + save_fitopt = True + result = calwebb_detector1_save_jump.delay(input_file, instrument, ramp_fit=ramp_fit, save_fitopt=save_fitopt, step_args=step_args) + else: + result = run_calwebb_detector1.delay(input_file, short_name, ext_or_exts, instrument, step_args=step_args) + return result + + +def retrieve_files(short_name, ext_or_exts, dest_dir): + """This function takes the name of a calibrated file, the desired extensions, the + directory to which they should be copied, and a redis lock. It then does the following: + + - Copy the file(s) with the provided extensions to the output directory + - Deletes the files from the transfer directory + - Releases the lock + + Parameters + ---------- + short_name : str + Name of the calibrated file (without any calibration tags or file extension) + + ext_or_exts : str or list of str + Desired extension(s) + + dest_dir : str + Location for the desired extensions + + Returns + ------- + output_file_or_files : str or list of str + The location of the requested calibrated files + """ + if isinstance(ext_or_exts, dict): + ext_or_exts = ext_or_exts[short_name] + config = get_config() + send_path = os.path.join(config["transfer_dir"], "incoming") + ensure_dir_exists(send_path) + receive_path = os.path.join(config["transfer_dir"], "outgoing") + ensure_dir_exists(receive_path) + + if isinstance(ext_or_exts, str): + ext_or_exts = [ext_or_exts] + file_or_files = ["{}_{}.fits".format(short_name, x) for x in ext_or_exts] + output_file_or_files = [os.path.join(dest_dir, x) for x in file_or_files] + transfer_file_or_files = [os.path.join(receive_path, x) for x in file_or_files] + logging.info("\t\tCopying {} to {}".format(file_or_files, dest_dir)) + copy_files([os.path.join(receive_path, x) for x in file_or_files], dest_dir) + logging.info("\t\tClearing Transfer Files") + to_clear = glob(os.path.join(send_path, short_name + "*")) + glob(os.path.join(receive_path, short_name + "*")) + for file in to_clear: + os.remove(file) + if len(output_file_or_files) == 1: + output_file_or_files = output_file_or_files[0] + return output_file_or_files + + +def run_pipeline(input_file, in_ext, ext_or_exts, instrument, jump_pipe=False): + """Convenience function for using the ``run_calwebb_detector1`` function on a data + file, including the following steps: + + - Lock the file ID so that no other calibration happens at the same time + - Copy the input (raw) file to the (central storage) transfer location + - Call the ``run_calwebb_detector1`` task + - For the extension (or extensions) (where by "extension" we mean 'uncal' or 'refpix' + or 'jump' rather than something like '.fits') requested, copy the files from the + outgoing transfer location to the same directory as the input file + - Delete the input file from the transfer location + - Delete the output files from the transfer location + + It will then return what it was given – either a single file+path or a list of + files+paths, depending on what ``out_exts`` was provided as. + + Parameters + ---------- + input_file : str + Name of fits file to run on the pipeline + + ext_or_exts : str or list-of-str + The requested output calibrated files + + instrument : str + Name of the instrument being calibrated + + jump_pipe : bool + Whether the detector1 jump pipeline is being used (e.g. the bad pixel monitor) + + Returns + ------- + file_or_files : str or list-of-str + Name (or names) of the result file(s), including path(s) + """ + logging.info("Pipeline Call for {} requesting {}".format(input_file, ext_or_exts)) + try: + retrieve_dir = os.path.dirname(input_file) + short_name, cal_lock, uncal_file = prep_file(input_file, in_ext) + uncal_name = os.path.basename(uncal_file) + result = start_pipeline(uncal_name, short_name, ext_or_exts, instrument, jump_pipe=jump_pipe) + logging.info("\t\tStarting with ID {}".format(result.id)) + processed_path = result.get() + logging.info("\t\tPipeline Complete") + output = retrieve_files(short_name, ext_or_exts, retrieve_dir) + except Exception as e: + logging.error('\tPipeline processing failed for {}'.format(input_name)) + logging.error('\tProcessing raised {}'.format(e)) + finally: + cal_lock.release() + logging.info("\tReleased Lock {}".format(short_name)) + + logging.info("Pipeline Call Completed") + return output + + +def run_parallel_pipeline(input_files, in_ext, ext_or_exts, instrument, jump_pipe=False, step_args={}): + """Convenience function for using the ``run_calwebb_detector1`` function on a list of + data files, breaking them into parallel celery calls, collecting the results together, + and returning the results as another list. In particular, this function will do the + following: + + - Lock the file ID so that no other calibration happens at the same time + - Copy the input (raw) file to the (central storage) transfer location + - Call the ``run_calwebb_detector1`` task + - For the extension (or extensions) (where by "extension" we mean 'uncal' or 'refpix' + or 'jump' rather than something like '.fits') requested, copy the files from the + outgoing transfer location to the same directory as the input file + - Delete the input file from the transfer location + - Delete the output files from the transfer location + + It will then return what it was given – either a single file+path or a list of + files+paths, depending on what ``out_exts`` was provided as. + + Parameters + ---------- + input_file : str + Name of fits file to run on the pipeline + + in_ext : str + Input file extension + + ext_or_exts : str or list-of-str or dict + The requested output calibrated files. This must be either: + + - A string indicating a single extension to be retrieved for all files. + - A list of strings indicating multiple extensions to be retrieved for all files. + - A dict with a key for each input file, containing either a single extension + string or a multiple-extension list of strings to be retrieved for that file + (note that a default dict can be used here) + + instrument : str + Name of the instrument being calibrated + + jump_pipe : bool + Whether the detector1 jump pipeline is being used (e.g. the bad pixel monitor) + + step_args : dict + Pipeline step arguments to be passed to the pipeline call. Nested dictionary with keys that + are the step names (as seen in pipeline_tools.PIPELINE_STEP_MAPPING). Each value is a + dictionary of keyword value pairs that are relevant for that step. + + Returns + ------- + file_or_files : str or list-of-str + Name (or names) of the result file(s), including path(s) + """ + logging.info("Pipeline call requested calibrated extensions {}".format(ext_or_exts)) + for input_file in input_files: + logging.info("\tCalibrating {}".format(input_file)) + + input_file_paths = {} + results = {} + locks = {} + outputs = {} + output_dirs = {} + + logging.info("Dispatching celery tasks") + try: + for input_file in input_files: + retrieve_dir = os.path.dirname(input_file) + logging.info("\tPipeline call for {} requesting {} sent to {}".format(input_file, ext_or_exts, retrieve_dir)) + short_name, cal_lock, uncal_file = prep_file(input_file, in_ext) + uncal_name = os.path.basename(uncal_file) + output_dirs[short_name] = retrieve_dir + input_file_paths[short_name] = input_file + locks[short_name] = cal_lock + results[short_name] = start_pipeline(uncal_name, short_name, ext_or_exts, instrument, jump_pipe=jump_pipe, step_args=step_args) + logging.info("\tStarting {} with ID {}".format(short_name, results[short_name].id)) + logging.info("Celery tasks submitted.") + logging.info("Waiting for task results") + for short_name in results: + try: + logging.info("\tWaiting for {} ({})".format(short_name, results[short_name].id)) + processed_path = results[short_name].get() + logging.info("\t{} retrieved".format(short_name)) + outputs[input_file_paths[short_name]] = retrieve_files(short_name, ext_or_exts, output_dirs[short_name]) + logging.info("\tFiles copied for {}".format(short_name)) + except Exception as e: + logging.error('\tPipeline processing failed for {}'.format(short_name)) + logging.error('\tProcessing raised {}'.format(e)) + logging.info("Finished retrieving results") + finally: + logging.info("Releasing locks") + for short_name in locks: + locks[short_name].release() + logging.info("\tReleased Lock {}".format(short_name)) + logging.info("Finished releasing locks") + + logging.info("Pipeline Call Completed") + return outputs + + +if __name__ == '__main__': + + pass diff --git a/jwql/tests/resources.py b/jwql/tests/resources.py new file mode 100644 index 000000000..13ab4042f --- /dev/null +++ b/jwql/tests/resources.py @@ -0,0 +1,86 @@ +"""Resources for unit tests. + +Authors +------- + + - Melanie Clarke + +Use +--- + + These structures can be imported in unit tests and used to + mock various software functionality. +""" + +import pandas as pd + +from jwql.utils.utils import get_config + + +def has_test_db(): + try: + config = get_config() + connection = config['connection_string'] + return 'dljwql' in connection + except (ValueError, KeyError, FileNotFoundError): + return False + + +class MockAnomalyQuery(object): + """Mock a SQLAlchemy query on an anomaly table.""" + def __init__(self, group_record=False): + records = [{'rootname': 'jw02589006001_04101_00001-seg001_nrs1', + 'persistence': True, 'crosstalk': True, + 'ghost': False}] + if group_record: + records.append( + {'rootname': 'jw02589006001_04101_00001-seg001_nrs2', + 'persistence': True, 'crosstalk': False, 'ghost': False}) + self.data_frame = pd.DataFrame(records) + + def filter(self, filter_val): + return self + + def order_by(self, sort_val): + return self + + +class MockSessionGroupAnomaly(object): + """Mock a SQLAlchemy session for an anomaly query on a group.""" + def query(self, table): + return MockAnomalyQuery(group_record=True) + + +class MockSessionFileAnomaly(object): + """Mock a SQLAlchemy session for an anomaly query on a file.""" + def query(self, table): + return MockAnomalyQuery(group_record=False) + + +class MockMessages(object): + """Mock Django messages for requests.""" + def __init__(self): + self.messages = [] + + def add(self, level, message, extra_tags): + self.messages.append(message) + + +class MockGetRequest(object): + """Mock a Django HTTP GET request.""" + def __init__(self): + self.method = 'GET' + self.session = dict() + self.GET = dict() + self.POST = dict() + self._messages = MockMessages() + + +class MockPostRequest(object): + """Mock a Django HTTP POST request.""" + def __init__(self): + self.method = 'POST' + self.session = dict() + self.GET = dict() + self.POST = dict() + self._messages = MockMessages() diff --git a/jwql/tests/test_api_views.py b/jwql/tests/test_api_views.py index e4ea0b8af..e3a2d1ca1 100644 --- a/jwql/tests/test_api_views.py +++ b/jwql/tests/test_api_views.py @@ -6,6 +6,8 @@ ------- - Matthew Bourque + - Bryan Hilbert + - Melanie Clarke Use --- @@ -39,27 +41,22 @@ # Instrument-specific URLs for instrument in JWST_INSTRUMENT_NAMES: urls.append('api/{}/proposals/'.format(instrument)) # instrument_proposals - urls.append('api/{}/preview_images/'.format(instrument)) # preview_images_by_instrument - urls.append('api/{}/thumbnails/'.format(instrument)) # thumbnails_by_instrument + urls.append('api/{}/looks/'.format(instrument)) # instrument_looks + urls.append('api/{}/looks/viewed/'.format(instrument)) # instrument_viewed + urls.append('api/{}/looks/new/'.format(instrument)) # instrument_new # Proposal-specific URLs -proposals = ['86700', # FGS - '98012', # MIRI - '93025', # NIRCam - '00308', # NIRISS - '308', # NIRISS - '96213'] # NIRSpec +proposals = ['2640', '02733', '1541', '02589'] + for proposal in proposals: urls.append('api/{}/filenames/'.format(proposal)) # filenames_by_proposal urls.append('api/{}/preview_images/'.format(proposal)) # preview_images_by_proposal urls.append('api/{}/thumbnails/'.format(proposal)) # thumbnails_by_proposal # Filename-specific URLs -rootnames = ['jw86600007001_02101_00001_guider2', # FGS - 'jw98012001001_02102_00001_mirimage', # MIRI - 'jw93025001001_02102_00001_nrca2', # NIRCam - 'jw00308001001_02103_00001_nis', # NIRISS - 'jw96213001001_02101_00001_nrs1'] # NIRSpec +rootnames = ['jw02733002001_02101_00001_mirimage', # MIRI + 'jw02733001001_02101_00001_nrcb2'] # NIRCam + for rootname in rootnames: urls.append('api/{}/filenames/'.format(rootname)) # filenames_by_rootname urls.append('api/{}/preview_images/'.format(rootname)) # preview_images_by_rootname @@ -91,14 +88,17 @@ def test_api_views(url): try: url = request.urlopen(url) - except error.HTTPError as e: - if e.code == 502: - pytest.skip("Dev server problem") - raise(e) + except (error.HTTPError, error.URLError): + pytest.skip("Server problem") try: data = json.loads(url.read().decode()) - assert len(data[data_type]) > 0 - except (http.client.IncompleteRead) as e: + + # viewed data depends on local database contents + # so may return an empty result + if data_type != 'viewed': + assert len(data[data_type]) > 0 + + except http.client.IncompleteRead as e: data = e.partial assert len(data) > 0 diff --git a/jwql/tests/test_bad_pixel_monitor.py b/jwql/tests/test_bad_pixel_monitor.py index 6cd78aa9e..6446570b6 100644 --- a/jwql/tests/test_bad_pixel_monitor.py +++ b/jwql/tests/test_bad_pixel_monitor.py @@ -16,6 +16,7 @@ pytest -s test_badpix_monitor.py """ +import datetime import numpy as np import os @@ -23,12 +24,14 @@ from jwst.datamodels import dqflags +from jwql.database.database_interface import engine, session from jwql.database.database_interface import NIRCamBadPixelQueryHistory, NIRCamBadPixelStats from jwql.database.database_interface import NIRISSBadPixelQueryHistory, NIRISSBadPixelStats from jwql.database.database_interface import MIRIBadPixelQueryHistory, MIRIBadPixelStats from jwql.database.database_interface import NIRSpecBadPixelQueryHistory, NIRSpecBadPixelStats from jwql.database.database_interface import FGSBadPixelQueryHistory, FGSBadPixelStats from jwql.instrument_monitors.common_monitors import bad_pixel_monitor +from jwql.tests.resources import has_test_db # Determine if tests are being run on Github Actions ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') @@ -174,13 +177,15 @@ def test_identify_tables(): def test_locate_rate_files(): """Test that rate files are found in filesystem""" - uncal_files = ['jw00300002001_02102_00001_mirimage_uncal.fits', 'jw00300002001_0210a_00001_mirimage_uncal.fits'] + uncal_files = ['jw02733002001_02101_00001_mirimage_uncal.fits', + 'jw02733002001_02101_00002_mirimage_uncal.fits'] ratefiles, ratefiles2copy = bad_pixel_monitor.locate_rate_files(uncal_files) rates = [os.path.basename(entry) for entry in ratefiles] rates2copy = [os.path.basename(entry) for entry in ratefiles2copy] - expected = ['jw00300002001_02102_00001_mirimage_rateints.fits', 'jw00300002001_0210a_00001_mirimage_rateints.fits'] + expected = ['jw02733002001_02101_00001_mirimage_rateints.fits', + 'jw02733002001_02101_00002_mirimage_rateints.fits'] assert rates == expected assert rates2copy == expected @@ -189,8 +194,8 @@ def test_locate_rate_files(): def test_locate_uncal_files(): """Test the filesystem search for uncal files """ - file1 = 'jw00300002001_02102_00001_mirimage_rate.fits' - file2 = 'jw00300010001_02102_00001_mirifushort_uncal.fits' + file1 = 'jw02733002001_02101_00001_mirimage_rate.fits' + file2 = 'jw02733002001_02101_00002_mirimage_uncal.fits' query_results = [{'filename': file1}, {'filename': file2}] @@ -212,3 +217,82 @@ def test_make_crds_parameter_dict(): assert params['INSTRUME'] == 'NIRCAM' assert params['DETECTOR'] == 'NRCALONG' assert params['CHANNEL'] == 'LONG' + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_add_bad_pix(): + coord = ([1, 2, 3], [4, 5, 6]) + pixel_type = 'test_new_pixel_type' + files = ['test.fits'] + obs_start = obs_mid = obs_end = datetime.datetime.now() + baseline = 'baseline.fits' + + badpix = bad_pixel_monitor.BadPixels() + badpix.instrument = 'nircam' + badpix.detector = 'nrcalong' + badpix.identify_tables() + + try: + badpix.add_bad_pix(coord, pixel_type, files, obs_start, + obs_mid, obs_end, baseline) + new_entries = session.query(badpix.pixel_table).filter( + badpix.pixel_table.type == pixel_type) + + assert new_entries.count() == 1 + assert new_entries[0].baseline_file == baseline + assert np.all(new_entries[0].x_coord == coord[0]) + assert np.all(new_entries[0].y_coord == coord[1]) + finally: + # clean up + session.query(badpix.pixel_table).filter( + badpix.pixel_table.type == pixel_type).delete() + session.commit() + assert session.query(badpix.pixel_table).filter( + badpix.pixel_table.type == pixel_type).count() == 0 + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_exclude_existing_badpix(): + coord = ([9999], [9999]) + pixel_type = 'hot' + + badpix = bad_pixel_monitor.BadPixels() + badpix.instrument = 'nircam' + badpix.detector = 'nrcalong' + badpix.identify_tables() + + # bad pixel type should raise error + with pytest.raises(ValueError) as err: + badpix.exclude_existing_badpix(coord, 'test_bad_type') + assert 'bad pixel type' in str(err) + + # new pixel should not be found + new_x, new_y = badpix.exclude_existing_badpix(coord, pixel_type) + assert new_x == [9999] + assert new_y == [9999] + + # add pixel, test again + files = ['test.fits'] + obs_start = obs_mid = obs_end = datetime.datetime.now() + baseline = 'test_baseline.fits' + + try: + badpix.add_bad_pix(coord, pixel_type, files, obs_start, + obs_mid, obs_end, baseline) + new_entries = session.query(badpix.pixel_table).filter( + badpix.pixel_table.baseline_file == baseline) + + assert new_entries.count() == 1 + + # new pixel should be found + new_x, new_y = badpix.exclude_existing_badpix(coord, pixel_type) + assert new_x == [] + assert new_y == [] + + finally: + # clean up + session.query(badpix.pixel_table).filter( + badpix.pixel_table.baseline_file == baseline).delete() + session.commit() + assert session.query(badpix.pixel_table).filter( + badpix.pixel_table.baseline_file == baseline).count() == 0 diff --git a/jwql/tests/test_bias_monitor.py b/jwql/tests/test_bias_monitor.py index d9ecfcac1..8becc2558 100644 --- a/jwql/tests/test_bias_monitor.py +++ b/jwql/tests/test_bias_monitor.py @@ -24,8 +24,9 @@ from astropy.io import fits import numpy as np -from jwql.database.database_interface import NIRCamBiasQueryHistory, NIRCamBiasStats +from jwql.database.database_interface import NIRCamBiasQueryHistory, NIRCamBiasStats, session from jwql.instrument_monitors.common_monitors import bias_monitor +from jwql.tests.resources import has_test_db from jwql.utils.utils import get_config ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') @@ -122,3 +123,49 @@ def test_make_histogram(): assert counts == counts_truth assert bin_centers == bin_centers_truth + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_process(mocker, tmp_path): + hdul = fits.HDUList([ + fits.PrimaryHDU(header=fits.Header({ + 'READPATT': 'test', 'DATE-OBS': 'test', + 'TIME-OBS': 'test'})), + fits.ImageHDU(np.zeros((10, 10, 10, 10)), name='SCI')]) + filename = str(tmp_path / 'test_raw_file.fits') + processed_file = str(tmp_path / 'test_processed_file.fits') + hdul.writeto(filename, overwrite=True) + hdul.writeto(processed_file, overwrite=True) + + monitor = bias_monitor.Bias() + monitor.instrument = 'nircam' + monitor.aperture = 'test' + monitor.identify_tables() + + assert not monitor.file_exists_in_database(filename) + + # mock the pipeline run + mocker.patch.object(bias_monitor, 'run_parallel_pipeline', + return_value={filename: processed_file}) + # mock amplifier info + mocker.patch.object(bias_monitor.instrument_properties, 'amplifier_info', + return_value=('test', 'test')) + mocker.patch.object(monitor, 'get_amp_medians', + return_value={'test': 0}) + # mock image creation + mocker.patch.object(monitor, 'make_histogram', + return_value=(np.zeros(10), np.zeros(10))) + mocker.patch.object(monitor, 'image_to_png', + return_value=str(tmp_path / 'output.png')) + + try: + monitor.process([filename]) + assert monitor.file_exists_in_database(filename) + finally: + # clean up + query = session.query(monitor.stats_table).filter( + monitor.stats_table.uncal_filename == filename) + query.delete() + session.commit() + + assert not monitor.file_exists_in_database(filename) diff --git a/jwql/tests/test_bokeh_templating.py b/jwql/tests/test_bokeh_templating.py deleted file mode 100644 index dff2cab75..000000000 --- a/jwql/tests/test_bokeh_templating.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Tests for the ``bokeh_templating`` module. -Authors -------- - - Graham Kanarek -Use ---- - These tests can be run via the command line (omit the -s to - suppress verbose output to stdout): - :: - pytest -s test_bokeh_templating.py -""" - -import os -import numpy as np -from jwql.bokeh_templating import BokehTemplate -file_dir = os.path.dirname(os.path.realpath(__file__)) - - -class TestTemplate(BokehTemplate): - """ - A minimal BokehTemplate app for testing purposes. This is adapted from - the example included in the ``bokeh_template`` package. - """ - - _embed = True - - def pre_init(self): - """ - Before creating the Bokeh interface (by parsing the interface - file), we must initialize our ``a`` and ``b`` variables, and set - the path to the interface file. - """ - - self.a, self.b = 4, 2 - - self.format_string = None - self.interface_file = os.path.join(file_dir, "test_bokeh_tempating_interface.yaml") - - # No post-initialization tasks are required. - post_init = None - - @property - def x(self): - """The x-value of the Lissajous curves.""" - return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500)) - - @property - def y(self): - """The y-value of the Lissajous curves.""" - return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500)) - - def controller(self, attr, old, new): - """ - This is the controller function which is used to update the - curves when the sliders are adjusted. Note the use of the - ``self.refs`` dictionary for accessing the Bokeh object - attributes. - """ - self.a = self.refs["a_slider"].value - self.b = self.refs["b_slider"].value - - self.refs["figure_source"].data = {'x': self.x, 'y': self.y} diff --git a/jwql/tests/test_bokeh_templating_interface.yaml b/jwql/tests/test_bokeh_templating_interface.yaml deleted file mode 100644 index 4aec297c7..000000000 --- a/jwql/tests/test_bokeh_templating_interface.yaml +++ /dev/null @@ -1,26 +0,0 @@ -- !Slider: &a_slider # a slider for the a value - ref: "a_slider" - title: "A" - value: 4 - range: !!python/tuple [1, 20, 0.1] - on_change: ['value', !self.controller ] -- !Slider: &b_slider # a slider for the b value - ref: "b_slider" - title: "B" - value: 2 - range: !!python/tuple [1, 20, 0.1] - on_change: ['value', !self.controller ] -- !ColumnDataSource: &figure_source # the ColumnDataSource for the figure - ref: "figure_source" - data: - x: !self.x - y: !self.y -- !Figure: &the_figure # the Figure itself, which includes a single line element. - ref: 'the_figure' - elements: - - {'kind': 'line', 'source': *figure_source, 'line_color': 'orange', 'line_width': 2} -- !Document: # the Bokeh document layout: a single column with the figure and two sliders - - !column: - - *the_figure # note the use of YAML anchors to add the Bokeh objects to the Document layout directly. - - *a_slider - - *b_slider \ No newline at end of file diff --git a/jwql/tests/test_cosmic_ray_monitor.py b/jwql/tests/test_cosmic_ray_monitor.py new file mode 100644 index 000000000..c9d590a3b --- /dev/null +++ b/jwql/tests/test_cosmic_ray_monitor.py @@ -0,0 +1,225 @@ +#! /usr/bin/env python + +"""Tests for the cosmic ray monitor module. + + Authors + ------- + + - Mike Engesser + + Use + --- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_cosmic_ray_monitor.py + """ + +import os + +from astropy.io import fits +import numpy as np +import pytest + +from jwql.instrument_monitors.common_monitors.cosmic_ray_monitor import CosmicRay +from jwql.database.database_interface import MIRICosmicRayQueryHistory +from jwql.utils.utils import get_config + +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + + +def define_test_data(nints): + """Define the data to test with. + + Parameters + ---------- + nints : int + The number of integrations + + Returns + ------- + data : numpy.ndarray + + """ + if nints == 1: + data = np.ones((2, 5, 10, 10)) + rate_data = np.ones((10, 10)) + else: + data = np.ones((2, 5, 10, 10)) + rate_data = np.ones((2, 10, 10)) + + filesystem = get_config()['filesystem'] + filename = os.path.join(filesystem, 'public', 'jw02733', 'jw02733002001', + 'jw02733002001_02101_00001_mirimage_rate.fits') + aperture = 'MIRIM_FULL' + + return data, rate_data, filename, aperture + + +def define_fake_test_data(): + """Create some fake data to test with + """ + # Create fake ramp and rates Signal goes up by 1 in each group + data = np.zeros((2, 5, 10, 10)) + for group in range(5): + data[:, group, :, :] = group + + rates = np.ones((2, 10, 10)) + + # Add in jumps + data[0, 3:, 4, 4] += 10. + data[0, 1:, 3, 3] -= 5. + data[1, 2:, 2, 2] += 3. + + header = {'TGROUP': 1.0} + jump_coords = [(0, 3, 4, 4), (0, 1, 3, 3), (1, 2, 2, 2)] + prior_coords = [(0, 2, 4, 4), (0, 0, 3, 3), (1, 1, 2, 2)] + + return data, rates, header, jump_coords, prior_coords + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_get_jump_data(): + """Test the ``get_jumpy_data`` function""" + + cr = CosmicRay() + _, _, filename, _ = define_test_data(2) + + header, data, dq = cr.get_jump_data(filename) + + assert type(header) == fits.header.Header + assert type(data) == np.ndarray + assert type(dq) == np.ndarray + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_get_rate_data(): + """Test the ``get_rate_data`` function""" + + cr = CosmicRay() + _, _, filename, _ = define_test_data(2) + + data = cr.get_rate_data(filename) + + assert type(data) == np.ndarray + + +def test_get_cr_rate(): + """Test the ``get_cr_rate`` function""" + + cr = CosmicRay() + jumps = 100 + header = fits.header.Header() + header['EFFEXPTM'] = 110. + header['TGROUP'] = 10. + header['SUBSIZE1'] = 50. + header['SUBSIZE2'] = 50. + + rate = cr.get_cr_rate(jumps, header) + assert rate == 0.0004 + + +def test_group_before(): + """Test the ``group_before`` function""" + + cr = CosmicRay() + + jump_locations = [(2, 1, 1)] + cr.nints = 1 + + assert cr.group_before(jump_locations) == [(1, 1, 1)] + + jump_locations = [(1, 2, 1, 1)] + cr.nints = 2 + + assert cr.group_before(jump_locations) == [(1, 1, 1, 1)] + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_magnitude(): + """Test the ``magnitude`` method""" + + cr = CosmicRay() + + cr.nints = 5 + data, rate_data, filename, aperture = define_test_data(cr.nints) + header = fits.getheader(filename) + coord = (1, 2, 1, 1) + coord_gb = (1, 1, 1, 1) + mag = cr.magnitude(coord, coord_gb, rate_data, data, header) + assert mag == -3 + + cr.nints = 1 + data, rate_data, filename, aperture = define_test_data(cr.nints) + coord = (1, 1, 1) + coord_gb = (0, 1, 1) + mag = cr.magnitude(coord, coord_gb, rate_data, data, header) + assert mag == -3 + + +def test_magnitude_fake_data(): + """Test the magnitude method using locally-constructed fake data + """ + data, rate, header, jump_coords, prior_coords = define_fake_test_data() + + cr = CosmicRay() + cr.nints = 2 + mag = cr.magnitude(jump_coords[0], prior_coords[0], rate, data, header) + assert mag == 10. + + +def test_get_cr_mags_fake_data(): + """Test the calculation of multiple CR magnitudes""" + data, rate, header, jump_coords, prior_coords = define_fake_test_data() + bin_indices = np.arange(65536 * 2 + 1, dtype=np.int32) - 65536 + + cr = CosmicRay() + cr.nints = 2 + mags, outliers = cr.get_cr_mags(jump_coords, prior_coords, rate, data, header) + assert len(mags) == 65536 * 2 + 1 # assert that it's a bin + assert mags[bin_indices[10]] == 1 + assert mags[bin_indices[-5]] == 1 + assert mags[bin_indices[3]] == 1 + + cr_one_int = CosmicRay() + cr_one_int.nints = 1 + int1_jump_coords = [c[1:] for c in jump_coords[0:2]] + int1_prior_coords = [c[1:] for c in prior_coords[0:2]] + mags, outliers = cr_one_int.get_cr_mags(int1_jump_coords, int1_prior_coords, rate[0, :, :], data, header) + assert mags[bin_indices[10]] == 1 + assert mags[bin_indices[-5]] == 1 + assert mags[bin_indices[3]] == 0 + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_most_recent_search(): + """Test the ``most_recent_search`` function""" + + cr = CosmicRay() + _, _, _, aperture = define_test_data(1) + + cr.aperture = aperture + cr.query_table = MIRICosmicRayQueryHistory + + result = cr.most_recent_search() + + assert isinstance(result, float) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_query_mast(): + """Test the ``query_mast`` function""" + + cr = CosmicRay() + _, _, _, aperture = define_test_data(1) + + cr.aperture = aperture + cr.instrument = 'miri' + cr.query_start = 57357.0 + cr.query_end = 57405.0 + + result = cr.query_mast() + + assert len(result) == 5 diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index 7b425d6b6..9cb11a96f 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -17,13 +17,16 @@ pytest -s test_dark_monitor.py """ +import datetime import os import pytest from astropy.time import Time import numpy as np +from jwql.database import database_interface as di from jwql.instrument_monitors.common_monitors import dark_monitor +from jwql.tests.resources import has_test_db from jwql.utils.monitor_utils import mast_query_darks from jwql.utils.utils import get_config @@ -55,6 +58,7 @@ def test_find_hot_dead_pixels(): assert np.all(dead[1] == np.array([6, 3])) +@pytest.mark.skip(reason='Needs update: different values than expected') @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_metadata(): """Test retrieval of metadata from input file""" @@ -72,6 +76,7 @@ def test_get_metadata(): assert monitor.frame_time == 10.5 +@pytest.mark.skip(reason='Needs update: no data returned') @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Currently no data in astroquery.mast. This can be removed for JWST operations.') def test_mast_query_darks(): """Test that the MAST query for darks is functional""" @@ -131,3 +136,83 @@ def test_shift_to_full_frame(): assert np.all(new_coords[0] == np.array([518, 519])) assert np.all(new_coords[1] == np.array([518, 515])) + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_add_bad_pix(): + coord = ([1, 2, 3], [4, 5, 6]) + pixel_type = 'test_new_pixel_type' + files = ['test.fits'] + obs_start = obs_mid = obs_end = datetime.datetime.now() + baseline = 'baseline.fits' + mean_file = 'meanfile.fits' + + dark = dark_monitor.Dark() + dark.instrument = 'nircam' + dark.detector = 'nrcalong' + dark.identify_tables() + + try: + dark.add_bad_pix(coord, pixel_type, files, mean_file, + baseline, obs_start, obs_mid, obs_end) + new_entries = di.session.query(dark.pixel_table).filter( + dark.pixel_table.type == pixel_type) + + assert new_entries.count() == 1 + assert new_entries[0].baseline_file == baseline + assert np.all(new_entries[0].x_coord == coord[0]) + assert np.all(new_entries[0].y_coord == coord[1]) + finally: + # clean up + di.session.query(dark.pixel_table).filter( + dark.pixel_table.type == pixel_type).delete() + di.session.commit() + assert di.session.query(dark.pixel_table).filter( + dark.pixel_table.type == pixel_type).count() == 0 + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_exclude_existing_badpix(): + coord = ([9999], [9999]) + pixel_type = 'hot' + + dark = dark_monitor.Dark() + dark.instrument = 'nircam' + dark.detector = 'nrcalong' + dark.identify_tables() + + # bad pixel type should raise error + with pytest.raises(ValueError) as err: + dark.exclude_existing_badpix(coord, 'test_bad_type') + assert 'bad pixel type' in str(err) + + files = ['test.fits'] + obs_start = obs_mid = obs_end = datetime.datetime.now() + baseline = 'test_baseline.fits' + mean_file = 'test_meanfile.fits' + try: + # new pixel should not be found + new_x, new_y = dark.exclude_existing_badpix(coord, pixel_type) + assert new_x == [9999] + assert new_y == [9999] + + # add pixel, test again + dark.add_bad_pix(coord, pixel_type, files, mean_file, + baseline, obs_start, obs_mid, obs_end) + new_entries = di.session.query(dark.pixel_table).filter( + dark.pixel_table.baseline_file == baseline) + + assert new_entries.count() == 1 + + # new pixel should be found + new_x, new_y = dark.exclude_existing_badpix(coord, pixel_type) + assert new_x == [] + assert new_y == [] + + finally: + # clean up + di.session.query(dark.pixel_table).filter( + dark.pixel_table.baseline_file == baseline).delete() + di.session.commit() + assert di.session.query(dark.pixel_table).filter( + dark.pixel_table.baseline_file == baseline).count() == 0 diff --git a/jwql/tests/test_data_containers.py b/jwql/tests/test_data_containers.py index f929da010..ee4ef5454 100644 --- a/jwql/tests/test_data_containers.py +++ b/jwql/tests/test_data_containers.py @@ -7,6 +7,10 @@ ------- - Matthew Bourque + - Mees Fix + - Bryan Hilbert + - Bradley Sappington + - Melanie Clarke Use --- @@ -20,14 +24,99 @@ """ import glob +import json import os +import numpy as np +import pandas as pd import pytest +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + # Skip testing this module if on Github Actions ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') from jwql.website.apps.jwql import data_containers -from jwql.utils.utils import get_config +from jwql.tests.resources import ( + MockSessionFileAnomaly, MockSessionGroupAnomaly, + MockGetRequest, MockPostRequest) +from jwql.utils import constants + +if not ON_GITHUB_ACTIONS: + from jwql.utils.utils import get_config + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +def test_build_table(): + tab = data_containers.build_table('filesystem_general') + assert isinstance(tab, pd.DataFrame) + assert len(tab['date']) > 0 + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +@pytest.mark.parametrize('filter_keys', + [{'instrument': 'NIRSpec', 'proposal': '2589', + 'obsnum': '006', 'look': 'All'}, + {'instrument': 'NIRCam', 'detector': 'NRCBLONG', + 'proposal': '2733', 'obsnum': '001'}, + {'instrument': 'MIRI', 'exp_type': 'MIR_IMAGE', + 'proposal': '1524', 'obsnum': '015'}, + {'instrument': 'FGS', 'cat_type': 'COM', + 'proposal': '1155'} + ]) +def test_filter_root_files(filter_keys): + rfi = data_containers.filter_root_files(**filter_keys) + assert len(rfi) > 0 + assert len(rfi) < 100 + + for key, value in filter_keys.items(): + if str(value).strip().lower() == 'all': + continue + elif key in ['cat_type', 'obsnum']: + # values returned are foreign keys + continue + else: + rf_test = [str(rf[key]) == str(value) for rf in rfi] + assert all(rf_test) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +def test_filter_root_files_sorting(): + filter_keys = {'instrument': 'NIRSpec', 'proposal': '2589', + 'obsnum': '006'} + + rfi = data_containers.filter_root_files(**filter_keys, sort_as='Ascending') + assert len(rfi) > 3 + for i, rf in enumerate(rfi[1:]): + assert rf['root_name'] > rfi[i]['root_name'] + + rfi = data_containers.filter_root_files(**filter_keys, sort_as='Descending') + for i, rf in enumerate(rfi[1:]): + assert rf['root_name'] < rfi[i]['root_name'] + + rfi = data_containers.filter_root_files(**filter_keys, sort_as='Recent') + for i, rf in enumerate(rfi[1:]): + assert rf['expstart'] <= rfi[i]['expstart'] + + rfi = data_containers.filter_root_files(**filter_keys, sort_as='Oldest') + for i, rf in enumerate(rfi[1:]): + assert rf['expstart'] >= rfi[i]['expstart'] + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_create_archived_proposals_context(tmp_path, mocker): + # write to a temporary directory + mocker.patch.object(data_containers, 'OUTPUT_DIR', str(tmp_path)) + archive_dir = tmp_path / 'archive_page' + os.mkdir(archive_dir) + + data_containers.create_archived_proposals_context('nirspec') + context_file = str(archive_dir / 'NIRSpec_archive_context.json') + assert os.path.isfile(context_file) + + with open(context_file, 'r') as obj: + context = json.load(obj) + assert context['inst'] == 'NIRSpec' + assert context['num_proposals'] > 0 def test_get_acknowledgements(): @@ -47,19 +136,202 @@ def test_get_all_proposals(): assert len(proposals) > 0 -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') -def test_get_expstart(): +@pytest.mark.parametrize('untracked,input_suffixes,expected', + [(True, [], ([], set())), + (True, ['rate', 'uncal', 'bad'], + (['uncal', 'rate', 'bad'], {'bad'})), + (False, ['rate', 'uncal', 'bad'], + ['uncal', 'rate', 'bad']), + (True, + ['rate', 'uncal', 'bad', + 'o006_crfints', 'o001_crf'], + (['uncal', 'rate', 'o001_crf', + 'o006_crfints', 'bad'], {'bad'})), + (False, + ['rate', 'uncal', 'bad', + 'o006_crfints', 'o001_crf'], + ['uncal', 'rate', 'o001_crf', + 'o006_crfints', 'bad']), + ]) +def test_get_available_suffixes(untracked, input_suffixes, expected): + result = data_containers.get_available_suffixes( + input_suffixes, return_untracked=untracked) + assert result == expected + + +# TODO - These tests will need to be refactored to account for Django Models +# TODO - We will need to implement django based testing to account for all Model based tests. +""" +def test_get_current_flagged_anomalies(mocker): + # get a sample query group with 2 files + + rootname = 'jw02589006001_04101_00001-seg001' + instrument = 'NIRSpec' + + # mock a single shared anomaly type + mocker.patch.object(data_containers.di, 'session', MockSessionGroupAnomaly()) + + result = data_containers.get_current_flagged_anomalies( + rootname, instrument, n_match=2) + assert result == ['persistence'] + + # get a sample query for 1 file + rootname = 'jw02589006001_04101_00001-seg001_nrs1' + + # mock two anomalies for this file + mocker.patch.object(data_containers.di, 'session', MockSessionFileAnomaly()) + + result = data_containers.get_current_flagged_anomalies( + rootname, instrument, n_match=1) + assert result == ['persistence', 'crosstalk'] + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +def test_get_anomaly_form_get(mocker): + request = MockGetRequest() + inst = 'NIRSpec' + file_root = 'jw02589006001_04101_00001-seg001_nrs1' + + # mock two anomalies for this file + mocker.patch.object(data_containers.di, 'session', + MockSessionFileAnomaly()) + + form = data_containers.get_anomaly_form(request, inst, file_root) + + # form should contain all anomaly options and two should be checked + html = str(form) + for anomaly in constants.ANOMALY_CHOICES_NIRSPEC: + assert anomaly[1] in html + assert html.count('checked') == 2 + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +def test_get_anomaly_form_post(mocker): + request = MockPostRequest() + inst = 'NIRSpec' + file_root = 'jw02589006001_04101_00001-seg001_nrs1' + + # mock two anomalies for this file + mocker.patch.object(data_containers.di, 'session', + MockSessionFileAnomaly()) + + # post a different selection: others are deselected + request.POST['anomaly_choices'] = ['optical_short'] + + # mock form validity and update functions + mocker.patch.object(data_containers.InstrumentAnomalySubmitForm, + 'is_valid', return_value=True) + update_mock = mocker.patch.object( + data_containers.InstrumentAnomalySubmitForm, 'update_anomaly_table') + + form = data_containers.get_anomaly_form(request, inst, file_root) + + # form should contain all anomaly options and only + # the chosen one should be checked + html = str(form) + for anomaly in constants.ANOMALY_CHOICES_NIRSPEC: + assert anomaly[1] in html + assert html.count('checked') == 1 + + # message should indicate success, update should have been called + assert 'Anomaly submitted successfully' in request._messages.messages + assert update_mock.call_count == 1 + + # mock invalid form + mocker.patch.object(data_containers.InstrumentAnomalySubmitForm, + 'is_valid', return_value=False) + data_containers.get_anomaly_form(request, inst, file_root) + + # messages indicate failure, update is not called again + assert 'Failed to submit anomaly' in request._messages.messages + assert update_mock.call_count == 1 + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +def test_get_anomaly_form_post_group(mocker): + request = MockPostRequest() + inst = 'NIRSpec' + file_root = 'jw02589006001_04101_00001-seg001' + + # mock anomalies for the group + mocker.patch.object(data_containers.di, 'session', + MockSessionGroupAnomaly()) + + # post a different selection: others are deselected, + # unless they belong only to the file, not to the group + # as a whole + request.POST['anomaly_choices'] = ['optical_short'] + + # mock form validity and update functions + mocker.patch.object(data_containers.InstrumentAnomalySubmitForm, + 'is_valid', return_value=True) + update_mock = mocker.patch.object( + data_containers.InstrumentAnomalySubmitForm, 'update_anomaly_table') + + form = data_containers.get_anomaly_form(request, inst, file_root) + + # form should contain all anomaly options and only + # the chosen one should be checked + html = str(form) + for anomaly in constants.ANOMALY_CHOICES_NIRSPEC: + assert anomaly[1] in html + assert html.count('checked') == 1 + + # message should indicate success, update should have been + # called for both files + assert 'Anomaly submitted successfully' in request._messages.messages + assert update_mock.call_count == 2 + + # mock invalid form + mocker.patch.object(data_containers.InstrumentAnomalySubmitForm, + 'is_valid', return_value=False) + data_containers.get_anomaly_form(request, inst, file_root) + + # messages indicate failure, update is not called again + assert 'Failed to submit anomaly' in request._messages.messages + assert update_mock.call_count == 2 +""" + +def test_get_dashboard_components(): + request = MockPostRequest() + + # empty POST + dash = data_containers.get_dashboard_components(request) + assert dash.delta_t is None + + # POST contains time delta + request.POST['time_delta_value'] = True + + # all time = None + request.POST['timedelta'] = 'All Time' + dash = data_containers.get_dashboard_components(request) + assert dash.delta_t is None + + # specific value + request.POST['timedelta'] = '1 Day' + dash = data_containers.get_dashboard_components(request) + assert dash.delta_t == pd.DateOffset(days=1) + + +@pytest.mark.parametrize('inst,fileroot,value', + [('NIRCam', + 'jw01068001001_02102_00001_nrcb1', 59714), + ('NIRSpec', + 'jw02589006001_04101_00001-seg002_nrs2', 59777), + ('NIRSpec', 'bad_filename', 0)]) +def test_get_expstart(inst, fileroot, value): """Tests the ``get_expstart`` function.""" + expstart = data_containers.get_expstart(inst, fileroot) - expstart = data_containers.get_expstart('FGS', 'jw00624008002_06201_00002_guider2') - assert isinstance(expstart, float) + # if mast query failed, it will return 0 + # otherwise, it should have a known value for this file + assert np.isclose(expstart, value, atol=1) -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_filenames_by_instrument(): """Tests the ``get_filenames_by_instrument`` function.""" - - filepaths = data_containers.get_filenames_by_instrument('FGS') + # queries MAST; should not need central storage + filepaths = data_containers.get_filenames_by_instrument('NIRCam', '1068') assert isinstance(filepaths, list) assert len(filepaths) > 0 @@ -67,8 +339,8 @@ def test_get_filenames_by_instrument(): @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_filenames_by_proposal(): """Tests the ``get_filenames_by_proposal`` function.""" - - filenames = data_containers.get_filenames_by_proposal('88600') + pid = '2589' + filenames = data_containers.get_filenames_by_proposal(pid) assert isinstance(filenames, list) assert len(filenames) > 0 @@ -76,17 +348,68 @@ def test_get_filenames_by_proposal(): @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_filenames_by_rootname(): """Tests the ``get_filenames_by_rootname`` function.""" - - filenames = data_containers.get_filenames_by_rootname('jw86600008001_02101_00007_guider2') + rname = 'jw02589006001_04101_00001-seg002_nrs2' + filenames = data_containers.get_filenames_by_rootname(rname) assert isinstance(filenames, list) assert len(filenames) > 0 +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +@pytest.mark.parametrize('pid,rname,success', + [('2589', None, True), + (None, 'jw02589006001_04101_00001-seg002_nrs2', True), + ('2589', 'jw02589006001_04101_00001-seg002_nrs2', True), + (None, None, False)]) +def test_get_filesystem_filenames(pid, rname, success): + """Tests the ``get_filesystem_filenames`` function.""" + filenames = data_containers.get_filesystem_filenames( + proposal=pid, rootname=rname) + assert isinstance(filenames, list) + if not success: + assert len(filenames) == 0 + else: + assert len(filenames) > 0 + + # check specific file_types + fits_files = [f for f in filenames if f.endswith('.fits')] + assert len(fits_files) < len(filenames) + + fits_filenames = data_containers.get_filesystem_filenames( + proposal=pid, rootname=rname, file_types=['fits']) + assert isinstance(fits_filenames, list) + assert len(fits_filenames) > 0 + assert len(fits_filenames) == len(fits_files) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_get_filesystem_filenames_options(): + """Tests the ``get_filesystem_filenames`` function.""" + pid = '2589' + + # basenames only + filenames = data_containers.get_filesystem_filenames( + proposal=pid, full_path=False, file_types=['fits']) + assert not os.path.isfile(filenames[0]) + + # full path + filenames = data_containers.get_filesystem_filenames( + proposal=pid, full_path=True, file_types=['fits']) + assert os.path.isfile(filenames[0]) + + # sorted + sorted_filenames = data_containers.get_filesystem_filenames( + proposal=pid, sort_names=True, file_types=['fits']) + unsorted_filenames = data_containers.get_filesystem_filenames( + proposal=pid, sort_names=False, file_types=['fits']) + assert sorted_filenames != unsorted_filenames + assert sorted_filenames == sorted(unsorted_filenames) + + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_header_info(): """Tests the ``get_header_info`` function.""" - header = data_containers.get_header_info('jw86600008001_02101_00007_guider2_uncal.fits') + header = data_containers.get_header_info('jw01068001001_02102_00001_nrcb1', 'uncal') assert isinstance(header, dict) assert len(header) > 0 @@ -94,8 +417,7 @@ def test_get_header_info(): @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_image_info(): """Tests the ``get_image_info`` function.""" - - image_info = data_containers.get_image_info('jw86600008001_02101_00007_guider2', False) + image_info = data_containers.get_image_info('jw01068001001_02102_00001_nrcb1') assert isinstance(image_info, dict) @@ -104,29 +426,78 @@ def test_get_image_info(): assert key in image_info -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_instrument_proposals(): """Tests the ``get_instrument_proposals`` function.""" - + # queries MAST, no need for central storage proposals = data_containers.get_instrument_proposals('Fgs') assert isinstance(proposals, list) assert len(proposals) > 0 -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') -def test_get_preview_images_by_instrument(): - """Tests the ``get_preview_images_by_instrument`` function.""" - - preview_images = data_containers.get_preview_images_by_instrument('fgs') - assert isinstance(preview_images, list) - assert len(preview_images) > 0 +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +@pytest.mark.parametrize('keys,viewed,sort_as,exp_type,cat_type', + [(None, None, None, None, None), + (None, 'viewed', None, None, None), + (None, 'Viewed', None, None, None), + (None, 'new', None, None, None), + (None, 'New', None, None, None), + (None, None, None, 'NRS_MSATA', None), + # (None, None, None, None, 'CAL'), # cat_type not implemented yet + (['expstart'], 'new', 'ascending', None, None), + (['expstart'], 'new', 'descending', None, None), + (['expstart'], 'new', 'recent', None, None), + ([], 'viewed', None, None, None), + ([], 'new', None, None, None), + ([], None, None, None, None), + (['proposal', 'obsnum', 'other', + 'prop_id', 'expstart'], 'viewed', None, None, None), + (['proposal', 'obsnum', 'other', + 'prop_id', 'expstart'], 'new', None, None, None), + (['proposal', 'obsnum', 'other', + 'prop_id', 'expstart'], None, None, None, None)]) +def test_get_instrument_looks(keys, viewed, sort_as, exp_type, cat_type): + """Tests the ``get_instrument_looks`` function.""" + + return_keys, looks = data_containers.get_instrument_looks( + 'nirspec', additional_keys=keys, look=viewed, sort_as=sort_as, + exp_type=exp_type, cat_type=cat_type) + assert isinstance(return_keys, list) + assert isinstance(looks, list) + + # returned keys always contains at least root name + assert len(return_keys) > 1 + assert 'root_name' in return_keys + assert 'viewed' in return_keys + + # they may also contain some keys from the instrument + # and any additional keys specified + if keys is not None: + assert len(return_keys) >= 1 + len(keys) + + # viewed depends on local database, so may or may not have results + if not str(viewed).lower() == 'viewed': + assert len(looks) > 0 + first_file = looks[0] + assert first_file['root_name'] != '' + assert isinstance(first_file['viewed'], bool) + assert len(first_file) == len(return_keys) + for key in return_keys: + assert key in first_file + + last_file = looks[-1] + if sort_as == 'ascending': + assert last_file['root_name'] > first_file['root_name'] + elif sort_as == 'recent': + assert last_file['expstart'] < first_file['expstart'] + else: + assert last_file['root_name'] < first_file['root_name'] @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_preview_images_by_proposal(): """Tests the ``get_preview_images_by_proposal`` function.""" - preview_images = data_containers.get_preview_images_by_proposal('88600') + preview_images = data_containers.get_preview_images_by_proposal('1033') assert isinstance(preview_images, list) assert len(preview_images) > 0 @@ -135,16 +506,23 @@ def test_get_preview_images_by_proposal(): def test_get_preview_images_by_rootname(): """Tests the ``get_preview_images_by_rootname`` function.""" - preview_images = data_containers.get_preview_images_by_rootname('jw86600008001_02101_00007_guider2') + preview_images = data_containers.get_preview_images_by_rootname('jw02589001001_02101_00001-seg001_nis') assert isinstance(preview_images, list) assert len(preview_images) > 0 +def test_get_proposals_by_category(): + """Tests the ``get_proposals_by_category`` function.""" + # MAST query, no need for central storage + proposals_by_category = data_containers.get_proposals_by_category('fgs') + assert isinstance(proposals_by_category, dict) + assert len(proposals_by_category) > 0 + + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_proposal_info(): """Tests the ``get_proposal_info`` function.""" - - filepaths = glob.glob(os.path.join(get_config()['filesystem'], 'jw00300', '*.fits')) + filepaths = glob.glob(os.path.join(get_config()['filesystem'], 'jw01068', '*.fits')) proposal_info = data_containers.get_proposal_info(filepaths) assert isinstance(proposal_info, dict) @@ -154,38 +532,77 @@ def test_get_proposal_info(): assert key in proposal_info -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') -def test_get_thumbnails_by_instrument(): - """Tests the ``get_thumbnails_by_instrument`` function.""" - - preview_images = data_containers.get_thumbnails_by_instrument('fgs') - assert isinstance(preview_images, list) - assert len(preview_images) > 0 - - @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_thumbnails_by_proposal(): """Tests the ``get_thumbnails_by_proposal`` function.""" - - preview_images = data_containers.get_thumbnails_by_proposal('88600') + preview_images = data_containers.get_thumbnails_by_proposal('01033') assert isinstance(preview_images, list) assert len(preview_images) > 0 @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') -def test_get_thumbnails_by_rootname(): - """Tests the ``get_thumbnails_by_rootname`` function.""" +def test_get_thumbnail_by_rootname(): + """Tests the ``get_thumbnail_by_rootname`` function.""" - preview_images = data_containers.get_thumbnails_by_rootname('jw86600008001_02101_00007_guider2') - assert isinstance(preview_images, list) + preview_images = data_containers.get_thumbnail_by_rootname('jw02589001001_02101_00001-seg001_nis') + assert isinstance(preview_images, str) + assert len(preview_images) > 0 + assert preview_images != 'none' + preview_images = data_containers.get_thumbnail_by_rootname('invalid_rootname') + assert isinstance(preview_images, str) assert len(preview_images) > 0 + assert preview_images == 'none' + + +def test_mast_query_by_rootname(): + """Tests the ``mast_query_by_rootname`` function.""" + instrument = 'NIRCam' + rootname1 = 'jw02767002001_02103_00005_nrcb4' + dict_stuff = data_containers.mast_query_by_rootname(instrument, rootname1) + defaults = dict(filter=dict_stuff.get('filter', ''), + detector=dict_stuff.get('detector', ''), + exp_type=dict_stuff.get('exp_type', ''), + read_pat=dict_stuff.get('readpatt', ''), + grating=dict_stuff.get('grating', ''), + patt_num=dict_stuff.get('patt_num', 0), + aperture=dict_stuff.get('apername', ''), + subarray=dict_stuff.get('subarray', ''), + pupil=dict_stuff.get('pupil', '')) + assert isinstance(defaults, dict) + + rootname2 = 'jw02084001001_04103_00001-seg003_nrca3' + dict_stuff = data_containers.mast_query_by_rootname(instrument, rootname2) + defaults = dict(filter=dict_stuff.get('filter', ''), + detector=dict_stuff.get('detector', ''), + exp_type=dict_stuff.get('exp_type', ''), + read_pat=dict_stuff.get('readpatt', ''), + grating=dict_stuff.get('grating', ''), + patt_num=dict_stuff.get('patt_num', 0), + aperture=dict_stuff.get('apername', ''), + subarray=dict_stuff.get('subarray', ''), + pupil=dict_stuff.get('pupil', '')) + assert isinstance(defaults, dict) + + instrument2 = 'FGS' + rootname3 = 'jw01029003001_06201_00001_guider2' + dict_stuff = data_containers.mast_query_by_rootname(instrument2, rootname3) + defaults = dict(filter=dict_stuff.get('filter', ''), + detector=dict_stuff.get('detector', ''), + exp_type=dict_stuff.get('exp_type', ''), + read_pat=dict_stuff.get('readpatt', ''), + grating=dict_stuff.get('grating', ''), + patt_num=dict_stuff.get('patt_num', 0), + aperture=dict_stuff.get('apername', ''), + subarray=dict_stuff.get('subarray', ''), + pupil=dict_stuff.get('pupil', '')) + assert isinstance(defaults, dict) @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_thumbnails_ajax(): """Tests the ``get_thumbnails_ajax`` function.""" - thumbnail_dict = data_containers.thumbnails_ajax('FGS') + thumbnail_dict = data_containers.thumbnails_ajax('NIRCam', '1068') assert isinstance(thumbnail_dict, dict) diff --git a/jwql/tests/test_database_interface.py b/jwql/tests/test_database_interface.py index 8151c5c33..08890a4f1 100755 --- a/jwql/tests/test_database_interface.py +++ b/jwql/tests/test_database_interface.py @@ -24,10 +24,12 @@ import random import string +from sqlalchemy import inspect + from jwql.database import database_interface as di +from jwql.tests.resources import has_test_db from jwql.utils.utils import get_config - # Determine if tests are being run on Github Actions ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') @@ -48,11 +50,13 @@ def test_all_tables_exist(): pass # Not all attributes of database_interface are table ORMs # Get list of tables that are actually in the database - existing_tables = di.engine.table_names() + existing_tables = inspect(di.engine).get_table_names() # Ensure that the ORMs defined in database_interface actually exist # as tables in the database for table in table_orms: + if table == 'nirspec_ta_stats': + continue assert table in existing_tables @@ -71,14 +75,13 @@ def test_anomaly_orm_factory(): assert item in table_attributes -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to development database server.') +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') def test_anomaly_records(): """Test to see that new records can be entered""" - # Add some data - random_rootname = ''.join(random.SystemRandom().choice(string.ascii_lowercase + - string.ascii_uppercase + - string.digits) for _ in range(10)) + random_rootname = ''.join(random.SystemRandom().choice( + string.ascii_lowercase + string.ascii_uppercase + + string.digits) for _ in range(10)) di.session.add(di.FGSAnomaly(rootname=random_rootname, flag_date=datetime.datetime.today(), user='test', ghost=True)) @@ -86,21 +89,27 @@ def test_anomaly_records(): # Test the ghosts column ghosts = di.session.query(di.FGSAnomaly)\ - .filter(di.FGSAnomaly.rootname == random_rootname)\ - .filter(di.FGSAnomaly.ghost == "True") - assert ghosts.data_frame.iloc[0]['ghost'] is True + .filter(di.FGSAnomaly.rootname == random_rootname) + assert bool(ghosts.data_frame.iloc[0]['ghost']) is True + # clean up + ghosts.delete() + di.session.commit() -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to development database server.') + assert di.session.query(di.FGSAnomaly)\ + .filter(di.FGSAnomaly.rootname == random_rootname).count() == 0 + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires database configuration.') def test_load_connections(): """Test to see that a connection to the database can be established""" - session, base, engine, meta = di.load_connection(get_config()['connection_string']) assert str(type(session)) == "" - assert str(type(base)) == "" + assert str(type(base)) == "" assert str(type(engine)) == "" assert str(type(meta)) == "" + session.close() def test_monitor_orm_factory(): diff --git a/jwql/tests/test_edb.py b/jwql/tests/test_edb.py index a4a3a699e..a7da24be5 100644 --- a/jwql/tests/test_edb.py +++ b/jwql/tests/test_edb.py @@ -17,37 +17,137 @@ pytest -s test_edb.py """ - +from datetime import datetime import os +from astropy.table import Table from astropy.time import Time +import astropy.units as u +from datetime import datetime, timedelta +import numpy as np import pytest +from jwql.edb import engineering_database as ed + # Determine if tests are being run on Github Actions ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +def test_add(): + """Test addition (i.e. concatenation) of two EdbMnemonic objects""" + dates1 = np.array([datetime(2021, 12, 18, 7, n, 0) for n in range(20, 30)]) + data1 = np.array([5, 5, 5, 9, 9, 9, 9, 9, 2, 2]) + tab = Table() + tab["dates"] = dates1 + tab["euvalues"] = data1 + blocks = [0, 3, 8] + info = {} + info['unit'] = 'V' + info['tlmMnemonic'] = 'TEST_VOLTAGE' + mnemonic1 = ed.EdbMnemonic('TEST_VOLTAGE', Time('2021-12-18T07:20:00'), Time('2021-12-18T07:30:00'), tab, {}, info, blocks=blocks) + + dates2 = np.array([datetime(2021, 12, 18, 7, n, 0) for n in range(27, 37)]) + data2 = np.array([9, 2, 2, 2, 19, 19, 19, 19, 12, 12]) + tab = Table() + tab["dates"] = dates2 + tab["euvalues"] = data2 + blocks = [0, 1, 4, 8] + info = {} + info['unit'] = 'V' + info['tlmMnemonic'] = 'TEST_VOLTAGE' + mnemonic2 = ed.EdbMnemonic('TEST_VOLTAGE', Time('2021-12-18T07:27:00'), Time('2021-12-18T07:33:00'), tab, {}, info, blocks=blocks) + + added = mnemonic1 + mnemonic2 + assert all(added.data["euvalues"] == np.array([5, 5, 5, 9, 9, 9, 9, 9, 2, 2, 2, 19, 19, 19, 19, 12, 12])) + assert all(added.data["dates"] == np.append(dates1, dates2[3:])) + assert added.info['unit'] == 'V' + + +def test_change_only_bounding_points(): + """Make sure we correctly add starting and ending time entries to + a set of change-only data + """ + dates = [datetime(2022, 3, 2, 12, i) for i in range(10)] + values = np.arange(10) + starting_time = datetime(2022, 3, 2, 12, 3, 3) + ending_time = datetime(2022, 3, 2, 12, 8, 4) + + new_dates, new_values = ed.change_only_bounding_points(dates, values, starting_time, ending_time) + + expected_dates = [starting_time] + dates[4:9] + [ending_time] + expected_values = list(values[3:9]) + [values[8]] + + assert np.all(new_dates == expected_dates) + assert np.all(new_values == expected_values) + + +def test_daily_stats(): + """Test that the daily statistics are calculated correctly + """ + dates = np.array([datetime(2021, 12, 18, 12, 0, 0) + timedelta(hours=n) for n in range(0, 75, 2)]) + data = [10.] * 12 + data.extend([25.] * 12) + data.extend([12.] * 12) + data.extend([50.] * 2) + tab = Table() + tab["dates"] = dates + tab["euvalues"] = data + mnemonic = ed.EdbMnemonic('SOMETHING', Time('2021-12-18T02:00:00'), Time('2021-12-21T14:00:00'), tab, {}, {}) + mnemonic.meta = {'Count': 1, + 'TlmMnemonics': [{'TlmMnemonic': 'SOMETHING', + 'AllPoints': 1}]} + + mnemonic.daily_stats() + assert np.all(mnemonic.mean == np.array([10., 25., 12., 50.])) + assert np.all(mnemonic.median == np.array([10., 25., 12., 50.])) + assert np.all(mnemonic.stdev == np.array([0., 0., 0., 0.])) + + +def test_full_stats(): + """Test that the statistics calculated over the entire data set are + correct + """ + dates = np.array([datetime(2021, 12, 18, 7, n, 0) for n in range(20, 30)]) + data = np.arange(1, 11) + tab = Table() + tab["dates"] = dates + tab["euvalues"] = data + mnemonic = ed.EdbMnemonic('SOMETHING', Time('2021-12-18T07:20:00'), Time('2021-12-18T07:30:00'), tab, {}, {}) + mnemonic.meta = {'Count': 1, + 'TlmMnemonics': [{'TlmMnemonic': 'SOMETHING', + 'AllPoints': 1}]} + mnemonic.full_stats() + assert mnemonic.mean[0] == 5.5 + assert mnemonic.median[0] == 5.5 + assert np.isclose(mnemonic.stdev[0], 2.8722813232690143) + assert mnemonic.median_times[0] == datetime(2021, 12, 18, 7, 24, 30) + + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_mnemonic(): """Test the query of a single mnemonic.""" from jwql.edb.engineering_database import get_mnemonic mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' - start_time = Time('2019-01-16 00:00:00.000', format='iso') - end_time = Time('2019-01-16 00:01:00.000', format='iso') + start_time = Time('2021-09-02 00:00:00.000', format='iso') + end_time = Time('2021-09-02 12:00:00.000', format='iso') mnemonic = get_mnemonic(mnemonic_identifier, start_time, end_time) - assert len(mnemonic.data) == mnemonic.meta['paging']['rows'] - - -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') -def test_get_mnemonic_info(): - """Test retrieval of mnemonic info.""" - from jwql.edb.engineering_database import get_mnemonic_info - - mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' - info = get_mnemonic_info(mnemonic_identifier) - assert 'subsystem' in info.keys() + assert len(mnemonic) == len(mnemonic.data["dates"]) + assert mnemonic.meta == {'Count': 1, + 'TlmMnemonics': [{'TlmMnemonic': 'IMIR_HK_ICE_SEC_VOLT4', + 'Subsystem': 'MIRI', + 'RawType': 'FL32', + 'EUType': 'FL32', + 'SQLType': 'REAL', + 'AllPoints': 1}]} + assert mnemonic.info == {'subsystem': 'MIRI', + 'tlmMnemonic': 'IMIR_HK_ICE_SEC_VOLT4', + 'tlmIdentifier': 210961, + 'description': 'MIR Housekeeping Packet ICE Motor Secondary Voltage 4', + 'sqlDataType': 'real', + 'unit': 'V', + 'longDescription': None} @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') @@ -61,3 +161,131 @@ def test_get_mnemonics(): mnemonic_dict = get_mnemonics(mnemonics, start_time, end_time) assert len(mnemonic_dict) == len(mnemonics) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_get_mnemonic_info(): + """Test retrieval of mnemonic info.""" + from jwql.edb.engineering_database import get_mnemonic_info + + mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' + info = get_mnemonic_info(mnemonic_identifier) + assert info == {'subsystem': 'MIRI', + 'tlmMnemonic': 'IMIR_HK_ICE_SEC_VOLT4', + 'tlmIdentifier': 210961, + 'description': 'MIR Housekeeping Packet ICE Motor Secondary Voltage 4', + 'sqlDataType': 'real', + 'unit': 'V', + 'longDescription': None} + + +def test_interpolation(): + """Test interpolation of an EdbMnemonic object""" + dates = np.array([datetime(2021, 12, 18, 7, n, 0) for n in range(20, 30)]) + data = np.arange(1, 11) + tab = Table() + tab["dates"] = dates + tab["euvalues"] = data + blocks = [0, 3, 8, 10] + mnemonic = ed.EdbMnemonic('SOMETHING', Time('2021-12-18T07:20:00'), Time('2021-12-18T07:30:00'), tab, {}, {}, blocks=blocks) + + # Indicate that these are not change-only data + mnemonic.meta = {'TlmMnemonics': [{'AllPoints': 1}]} + + # Note that the first element of interp_times is before the earliest value + # of the mnemonic time, so it should be ignored. + base_interp = datetime(2021, 12, 18, 7, 19, 30) + interp_times = [base_interp + timedelta(seconds=30 * n) for n in range(0, 20)] + + mnemonic.interpolate(interp_times) + assert all(mnemonic.data["dates"].data == interp_times[1:]) + assert all(mnemonic.data["euvalues"].data == np.arange(10, 101, 5) / 10.) + assert all(mnemonic.blocks == np.array([0, 6, 16, len(mnemonic)])) + + +def test_interpolation_change_only(): + """Test interpolation of change-only data""" + dates = np.array([datetime(2021, 12, 18, 7, n, 0) for n in range(20, 30)]) + data = np.arange(1, 11) + tab = Table() + tab["dates"] = dates + tab["euvalues"] = data + blocks = [0, 3, 8, 10] + mnemonic = ed.EdbMnemonic('SOMETHING', Time('2021-12-18T07:20:00'), Time('2021-12-18T07:30:00'), tab, {}, {}, blocks=blocks) + + # Indicate that these are change-only data + mnemonic.meta = {'TlmMnemonics': [{'AllPoints': 0}]} + + # Note that the first element of interp_times is before the earliest value + # of the mnemonic time, so it should be ignored. + base_interp = datetime(2021, 12, 18, 7, 19, 30) + interp_times = [base_interp + timedelta(seconds=30 * n) for n in range(0, 20)] + + mnemonic.interpolate(interp_times) + expected_values = np.array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10]) + expected_blocks = np.array([0, 6, 16, 19]) + assert all(mnemonic.data["euvalues"].data == expected_values) + assert all(mnemonic.data["dates"].data == np.array(interp_times[1:])) + assert all(mnemonic.blocks == expected_blocks) + + +def test_multiplication(): + """Test multiplication of two EdbMnemonic objects""" + dates1 = np.array([datetime(2021, 12, 18, 7, n, 0) for n in range(20, 30)]) + data1 = np.array([5, 5, 5, 9, 9, 9, 9, 9, 2, 2]) + tab = Table() + tab["dates"] = dates1 + tab["euvalues"] = data1 + blocks1 = [0, 3, 8, 10] + info = {} + info['unit'] = 'V' + info['tlmMnemonic'] = 'TEST_VOLTAGE' + info['description'] = 'Voltage at some place' + mnemonic1 = ed.EdbMnemonic('TEST_VOLTAGE', Time('2021-12-18T07:20:00'), Time('2021-12-18T07:30:00'), tab, {}, info, blocks=blocks1) + mnemonic1.meta = {'Count': 1, + 'TlmMnemonics': [{'TlmMnemonic': 'TEST_VOLTAGE', + 'AllPoints': 1}]} + + dates2 = np.array([datetime(2021, 12, 18, 7, n, 10) for n in range(20, 30)]) + data2 = np.array([15, 15, 15, 19, 19, 19, 19, 19, 12, 12]) + tab = Table() + tab["dates"] = dates2 + tab["euvalues"] = data2 + blocks2 = [0, 3, 8, 10] + info = {} + info['unit'] = 'A' + info['tlmMnemonic'] = 'TEST_CURRENT' + info['description'] = 'Current at some place' + mnemonic2 = ed.EdbMnemonic('TEST_CURRENT', Time('2021-12-18T07:20:10'), Time('2021-12-18T07:30:10'), tab, {}, info, blocks=blocks2) + mnemonic2.meta = {'Count': 1, + 'TlmMnemonics': [{'TlmMnemonic': 'TEST_CURRENT', + 'AllPoints': 1}]} + + prod = mnemonic1 * mnemonic2 + assert np.allclose(prod.data["euvalues"].data, + np.array([75.0, 75.0, 165.0, 171.0, 171.0, 171.0, + 171.0, 26.333333333333336, 24.0])) + assert all(prod.data["dates"].data == mnemonic1.data["dates"][1:]) + assert all(prod.blocks == [0, 2, 7, 9]) + assert prod.info['unit'] == 'W' + assert prod.info['tlmMnemonic'] == 'TEST_VOLTAGE * TEST_CURRENT' + + +def test_timed_stats(): + """Break up data into chunks of a given duration""" + dates = np.array([datetime(2021, 12, 18, 12, 0, 0) + timedelta(hours=n) for n in range(0, 75, 2)]) + block_val = np.array([1, 1.1, 1, 1.1, 1, 1.1]) + data = np.concatenate((block_val, block_val + 1, block_val + 2, block_val + 3, block_val + 4, block_val + 5)) + data = np.append(data, np.array([95., 97.])) + + tab = Table() + tab["dates"] = dates + tab["euvalues"] = data + mnemonic = ed.EdbMnemonic('SOMETHING', Time('2021-12-18T02:00:00'), Time('2021-12-21T14:00:00'), tab, {}, {}) + mnemonic.meta = {'Count': 1, + 'TlmMnemonics': [{'TlmMnemonic': 'SOMETHING', + 'AllPoints': 1}]} + duration = 12 * u.hour + mnemonic.mean_time_block = duration + mnemonic.timed_stats(sigma=3) + assert np.all(np.isclose(mnemonic.mean, np.append(np.arange(1.05, 6.06, 1), 96.))) diff --git a/jwql/tests/test_edb_telemetry_monitor.py b/jwql/tests/test_edb_telemetry_monitor.py new file mode 100644 index 000000000..f7a91e5d3 --- /dev/null +++ b/jwql/tests/test_edb_telemetry_monitor.py @@ -0,0 +1,382 @@ +#! /usr/bin/env python + +"""Tests for the EDB telemetry + +Authors +------- + + - Bryan Hilbert + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_edb_telemetry_monitor.py +""" +from collections import defaultdict +from copy import deepcopy +import os +import pytest +from types import SimpleNamespace + +from astropy.stats import sigma_clipped_stats +from astropy.table import Table +from astropy.table.column import Column +from astropy.time import Time, TimeDelta +import astropy.units as u +import datetime +import numpy as np + +from jwql.database.database_interface import session +from jwql.edb.engineering_database import EdbMnemonic +from jwql.instrument_monitors.common_monitors import edb_telemetry_monitor as etm +from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import condition as cond +from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import utils as etm_utils +from jwql.tests.resources import has_test_db +from jwql.utils.constants import MIRI_POS_RATIO_VALUES + +# Determine if tests are being run on Github Actions +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + + +def test_add_every_change_history(): + """Test that every_change data is correctly combined with an existing + set of every_change data + """ + dates1 = np.array([datetime.datetime(2022, 3, 4, 1, 5, i) for i in range(10)]) + data1 = np.array([0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2]) + means1 = 0.15 + devs1 = 0.07 + dates2 = np.array([dates1[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)]) + data2 = np.array([0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4]) + means2 = 0.35 + devs2 = 0.07 + ec1 = {'0.15': (dates1, data1, means1, devs1), + '0.35': (dates2, data2, means2, devs2) + } + ec2 = {'0.15': (dates1, data1, means1, devs1)} + combine1 = etm.add_every_change_history(ec1, ec2) + expected1 = defaultdict(list) + expected1['0.15'] = (np.append(dates1, dates1), np.append(data1, data1), np.append(means1, means1), np.append(devs1, devs1)) + expected1['0.35'] = (dates2, data2, means2, devs2) + + for key in combine1: + print('compare ', key) + for i, cele in enumerate(combine1[key]): + assert np.all(cele == expected1[key][i]) + + dates3 = np.array([dates2[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)]) + ec3 = {'0.55': (dates3, data2 + 0.2, means2 + 0.2, devs2)} + combine2 = etm.add_every_change_history(ec1, ec3) + expected2 = defaultdict(list) + expected2['0.15'] = (dates1, data1, means1, devs1) + expected2['0.35'] = (dates2, data2, means2, devs2) + expected2['0.55'] = (dates3, data2 + 0.2, means2 + 0.2, devs2) + + for key in combine2: + print('compare ', key) + for i, cele in enumerate(combine2[key]): + assert np.all(cele == expected2[key][i]) + + +def test_change_only_add_points(): + """Make sure we convert change-only data to AllPoints data correctly + """ + dates = [datetime.datetime(2021, 7, 14, 5, 24 + i, 0) for i in range(3)] + values = np.arange(3) + data = Table([dates, values], names=('dates', 'euvalues')) + mnem = EdbMnemonic('SOMETHING', datetime.datetime(2021, 7, 14), datetime.datetime(2021, 7, 14, 6), data, {}, {}) + mnem.meta = {'TlmMnemonics': [{'TlmMnemonic': 'SOMETHING', + 'AllPoints': 0}]} + mnem.change_only_add_points() + + expected_dates = [datetime.datetime(2021, 7, 14, 5, 24, 0), datetime.datetime(2021, 7, 14, 5, 24, 59, 999999), + datetime.datetime(2021, 7, 14, 5, 25, 0), datetime.datetime(2021, 7, 14, 5, 25, 59, 999999), + datetime.datetime(2021, 7, 14, 5, 26, 0)] + expected_values = [0, 0, 1, 1, 2] + expected = Table([expected_dates, expected_values], names=('dates', 'euvalues')) + + assert np.all(expected["dates"] == mnem.data["dates"]) + assert np.all(expected["euvalues"] == mnem.data["euvalues"]) + + +def test_conditions(): + """Test the extraction of data using the ```equal``` class. + """ + # Create data for mnemonic of interest + #start_time = Time('2022-02-02') + #end_time = Time('2022-02-03') + start_time = datetime.datetime(2022, 2, 2) + end_time = datetime.datetime(2022, 2, 3) + temp_data = Table() + temp_data["euvalues"] = np.array([35., 35.1, 35.2, 36., 36.1, 36.2, 37.1, 37., 36., 36.]) + #temp_data["dates"] = np.array([Time('2022-02-02') + TimeDelta(0.1 * i, format='jd') for i in range(10)]) + temp_data["dates"] = np.array([start_time + datetime.timedelta(days=0.1 * i) for i in range(10)]) + meta = {} + info = {} + temperature = EdbMnemonic("TEMPERATURE", start_time, end_time, temp_data, meta, info) + + # Create conditional data + current_data = {} + current_data["euvalues"] = np.array([1., 1., 1., 2.5, 2.5, 2.5, 5.5, 5.5, 2.5, 2.5]) + current_data["dates"] = np.array([start_time + datetime.timedelta(days=0.1001 * i) for i in range(10)]) + + # Using a single relation class + eq25 = cond.relation_test(current_data, '==', 2.5) + condition_list = [eq25] + condition_1 = cond.condition(condition_list) + + # Extract the good data + condition_1.extract_data(temperature.data) + + # Expected results + expected_table = Table() + frac_days = [0.4, 0.5, 0.9] + expected_table["dates"] = [start_time + datetime.timedelta(days=frac) for frac in frac_days] + expected_table["euvalues"] = [36.1, 36.2, 36.0] + + assert np.all(condition_1.extracted_data == expected_table) + assert condition_1.block_indexes == [0, 2, 3] + + grt0 = cond.relation_test(current_data, '>', 0) + condition_list.append(grt0) + condition_2 = cond.condition(condition_list) + condition_2.extract_data(temperature.data) + assert np.all(condition_2.extracted_data == expected_table) + assert condition_2.block_indexes == [0, 2, 3] + + less10 = cond.relation_test(current_data, '<', 10) + condition_list.append(less10) + condition_3 = cond.condition(condition_list) + condition_3.extract_data(temperature.data) + assert np.all(condition_3.extracted_data == expected_table) + assert condition_3.block_indexes == [0, 2, 3] + + +def test_find_all_changes(): + inst = etm.EdbMnemonicMonitor() + + # Create test data + start_time = Time('2022-02-02') + end_time = Time('2022-02-03') + temp_data = Table() + temp_data["euvalues"] = [350., 350.1, 350.2, 360., 360.1, 360.2, 370.1, 370., 360., 360.] + temp_data["dates"] = np.array([datetime.datetime(2022, 2, 2) + datetime.timedelta(days=0.1 * i) for i in range(10)]) + meta = {'TlmMnemonics': [{'AllPoints': 1}]} + info = {} + temperature = EdbMnemonic("TEMPERATURE", start_time, end_time, temp_data, meta, info) + temperature.blocks = [] + + # Create dictionary of dependency info + dependency = [{"name": "CURRENT", "relation": "none", "threshold": 0}] + + # Create dependency data + current_data = Table() + current_data["euvalues"] = ['LOW', 'LOW', 'LOW', 'MEDIUM', 'MEDIUM', 'MEDIUM', 'HIGH', 'HIGH', 'MEDIUM', 'MEDIUM'] + current_data["dates"] = np.array([datetime.datetime(2022, 2, 2) + datetime.timedelta(days=0.1001 * i) for i in range(10)]) + inst.query_results[dependency[0]["name"]] = EdbMnemonic("CURRENT", start_time, end_time, current_data, meta, info) + + vals = inst.find_all_changes(temperature, dependency) + assert np.isclose(vals.mean[0], 359.07) + assert np.isclose(vals.median[0], 360.0) + assert np.isclose(vals.stdev[0], 6.9818407314976785) + + +def test_get_averaging_time_duration(): + """Test that only allowed string formats are used for averaging time duration + """ + in_strings = ["5_minute", "45_second", "10_day", "2_hour"] + expected_vals = [5 * u.minute, 45 * u.second, 10 * u.day, 2 * u.hour] + + for inval, outval in zip(in_strings, expected_vals): + output = etm_utils.get_averaging_time_duration(inval) + assert output == outval + + bad_strings = ["7_years", "nonsense"] + for inval in bad_strings: + with pytest.raises(ValueError) as e_info: + output = etm_utils.get_averaging_time_duration(inval) + + +def test_get_query_duration(): + """Test that the correct query duration is found + """ + in_strings = ['daily_means', "every_change", "block_means", "time_interval", "all"] + expected_vals = [datetime.timedelta(days=0.01041667), datetime.timedelta(days=1), datetime.timedelta(days=1), + datetime.timedelta(days=1), datetime.timedelta(days=1)] + for inval, outval in zip(in_strings, expected_vals): + output = etm_utils.get_query_duration(inval) + assert output == outval + + with pytest.raises(ValueError) as e_info: + output = etm_utils.get_query_duration("bad_string") + + +def test_key_check(): + """Test the dictionary key checker + """ + d = {'key1': [1, 2, 3], 'key4': 'a'} + assert etm_utils.check_key(d, 'key1') == d['key1'] + assert etm_utils.check_key(d, 'key2') is None + + +def test_multiple_conditions(): + """Test that filtering using multiple conditions is working as expected. + """ + # Create data for mnemonic of interest + start_time = datetime.datetime(2022, 2, 2) + end_time = datetime.datetime(2022, 2, 3) + temp_data = Table() + temp_data["euvalues"] = Column(np.array([35., 35.1, 35.2, 36., 36.1, 36.2, 37.1, 37., 36., 36.])) + temp_data["dates"] = Column(np.array([start_time + datetime.timedelta(days=0.1 * i) for i in range(10)])) + meta = {} + info = {} + temperature = EdbMnemonic("TEMPERATURE", start_time, end_time, temp_data, meta, info) + + # Create conditional data + current_data = {} + current_data["euvalues"] = Column(np.array([1., 2.5, 2.5, 2.5, 2.5, 2.5, 5.5, 5.5, 2.5, 2.5])) + current_data["dates"] = Column(np.array([start_time + datetime.timedelta(days=0.1001 * i) for i in range(10)])) + + element_data = {} + element_data["euvalues"] = Column(np.repeat("OFF", 20)) + element_data["euvalues"][13:] = "ON" + element_data["dates"] = Column(np.array([start_time + datetime.timedelta(days=0.06 * i) for i in range(20)])) + + grt35 = cond.relation_test(temp_data, '>', 35.11) + eq25 = cond.relation_test(current_data, '==', 2.5) + off = cond.relation_test(element_data, '=', 'OFF') + condition_list = [grt35, eq25, off] + condition = cond.condition(condition_list) + condition.extract_data(temperature.data) + + # Compare to expectations + expected_table = temp_data[2:6] + + assert np.all(condition.extracted_data["euvalues"] == expected_table["euvalues"]) + assert np.all(condition.extracted_data["dates"] == expected_table["dates"]) + assert condition.block_indexes == [0, 4] + + +def test_organize_every_change(): + """Test the reorganization of every_change data from an EdbMnemonic into something + easier to plot + """ + basetime = datetime.datetime(2021, 4, 6, 14, 0, 0) + dates = np.array([basetime + datetime.timedelta(seconds=600 * i) for i in range(20)]) + #dates = np.array([basetime + TimeDelta(600 * i, format='sec') for i in range(20)]) + vals = np.array([300.5, 310.3, -250.5, -500.9, 32.2, + 300.1, 310.8, -250.2, -500.2, 32.7, + 300.2, 310.4, -250.6, -500.8, 32.3, + 300.4, 310.5, -250.4, -500.1, 32.9]) + ec_vals = ["F2550W", 'F560W', 'F770W', 'F1000W', 'F1500W', + "F2550W", 'F560W', 'F770W', 'F1000W', 'F1500W', + "F2550W", 'F560W', 'F770W', 'F1000W', 'F1500W', + "F2550W", 'F560W', 'F770W', 'F1000W', 'F1500W'] + + m = Table() + m["dates"] = dates + m["euvalues"] = vals + mnem = EdbMnemonic('IMIR_HK_FW_POS_RATIO', Time('2021-04-06T00:00:00'), Time('2021-04-06T23:00:00'), + m, {}, {"unit": "Pos Ratio"}) + mnem.every_change_values = ec_vals + data = etm.organize_every_change(mnem) + + f2550_idx = [0, 5, 10, 15] + f560_idx = [1, 6, 11, 16] + f770_idx = [2, 7, 12, 17] + f1000_idx = [3, 8, 13, 18] + f1500_idx = [4, 9, 14, 19] + + f2550_vals = vals[f2550_idx] + f560_vals = vals[f560_idx] + f770_vals = vals[f770_idx] + f1000_vals = vals[f1000_idx] + f1500_vals = vals[f1500_idx] + + f2550mean, _, _ = sigma_clipped_stats(f2550_vals, sigma=3) + f560mean, _, _ = sigma_clipped_stats(f560_vals, sigma=3) + f770mean, _, _ = sigma_clipped_stats(f770_vals, sigma=3) + f1000mean, _, _ = sigma_clipped_stats(f1000_vals, sigma=3) + f1500mean, _, _ = sigma_clipped_stats(f1500_vals, sigma=3) + expected = {'F2550W': (np.array(dates[f2550_idx]), f2550_vals, MIRI_POS_RATIO_VALUES['FW']['F2550W'][0]), + 'F560W': (np.array(dates[f560_idx]), f560_vals, MIRI_POS_RATIO_VALUES['FW']['F560W'][0]), + 'F770W': (np.array(dates[f770_idx]), f770_vals, MIRI_POS_RATIO_VALUES['FW']['F770W'][0]), + 'F1000W': (np.array(dates[f1000_idx]), f1000_vals, MIRI_POS_RATIO_VALUES['FW']['F1000W'][0]), + 'F1500W': (np.array(dates[f1500_idx]), f1500_vals, MIRI_POS_RATIO_VALUES['FW']['F1500W'][0])} + + for key, val in expected.items(): + assert np.all(val[0] == data[key][0]) + assert np.all(val[1] == data[key][1]) + assert np.all(val[2] == data[key][2]) + + +def test_remove_outer_points(): + """Test that points outside the requested time are removed for change-only data + """ + data = Table() + data["dates"] = [datetime.datetime(2014, 12, 8) + datetime.timedelta(days=0.5 * (i + 1)) for i in range(5)] + data["euvalues"] = [1, 2, 3, 4, 5] + mnem = EdbMnemonic('TEST', datetime.datetime(2022, 12, 9), datetime.datetime(2022, 12, 10), data, {}, {}) + orig = deepcopy(mnem) + etm_utils.remove_outer_points(mnem) + assert all(orig.data['dates'][1:-1] == mnem.data['dates']) + assert all(orig.data['euvalues'][1:-1] == mnem.data['euvalues']) + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_add_new_block_db_entry(): + query_time = datetime.datetime.now() + + # mock mnem structure + dates = SimpleNamespace(data=datetime.datetime.now()) + euvalues = SimpleNamespace(data=1) + mnem = SimpleNamespace(data={'dates': dates, 'euvalues': euvalues}, + stdev=0, median=1, max=2, min=1, + mnemonic_identifier='test') + + monitor = etm.EdbMnemonicMonitor() + monitor.identify_tables('nircam', 'daily') + + try: + monitor.add_new_block_db_entry(mnem, query_time) + new_entries = session.query(monitor.history_table).filter( + monitor.history_table.mnemonic == 'test') + assert new_entries.count() == 1 + finally: + # clean up + session.query(monitor.history_table).filter( + monitor.history_table.mnemonic == 'test').delete() + session.commit() + assert session.query(monitor.history_table).filter( + monitor.history_table.mnemonic == 'test').count() == 0 + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_add_new_every_change_db_entry(): + # mock mnem dict + mnem_dict = {'test1': (datetime.datetime.now(), 1., 1., 1.), + 'test2': (datetime.datetime.now(), 1., 1., 1.)} + mnem = 'test_mnem' + dependency_name = 'test_dependency' + query_time = datetime.datetime.now() + + monitor = etm.EdbMnemonicMonitor() + monitor.identify_tables('nircam', 'every_change') + + try: + monitor.add_new_every_change_db_entry( + mnem, mnem_dict, dependency_name, query_time) + new_entries = session.query(monitor.history_table).filter( + monitor.history_table.mnemonic == 'test_mnem') + assert new_entries.count() == 2 + finally: + # clean up + session.query(monitor.history_table).filter( + monitor.history_table.mnemonic == 'test_mnem').delete() + session.commit() + assert session.query(monitor.history_table).filter( + monitor.history_table.mnemonic == 'test_mnem').count() == 0 diff --git a/jwql/tests/test_loading_times.py b/jwql/tests/test_loading_times.py index f314714b9..7c83fed72 100644 --- a/jwql/tests/test_loading_times.py +++ b/jwql/tests/test_loading_times.py @@ -78,7 +78,12 @@ def test_loading_times(url): print('Testing {}'.format(url)) t1 = time.time() - url = urllib.request.urlopen(url) + try: + urllib.request.urlopen(url) + except (urllib.error.HTTPError, urllib.error.URLError): + # may be missing data or no running server + pytest.skip("Server problem") + t2 = time.time() assert (t2 - t1) <= TIME_CONSTRAINT diff --git a/jwql/tests/test_monitor_mast.py b/jwql/tests/test_mast_utils.py similarity index 79% rename from jwql/tests/test_monitor_mast.py rename to jwql/tests/test_mast_utils.py index 23e700128..74c3f0328 100755 --- a/jwql/tests/test_monitor_mast.py +++ b/jwql/tests/test_mast_utils.py @@ -1,6 +1,6 @@ #! /usr/bin/env python -"""Tests for the ``monitor_mast`` module. +"""Tests for the ``mast_utils`` module. Authors ------- @@ -14,15 +14,15 @@ suppress verbose output to stdout): :: - pytest -s test_monitor_mast.py + pytest -s test_mast_utils.py """ import os from astroquery.mast import Mast -from jwql.jwql_monitors import monitor_mast as mm from jwql.utils.constants import JWST_INSTRUMENT_NAMES +from jwql.utils import mast_utils as mu from jwql.utils.utils import get_config # Temporary until JWST operations: switch to test string for MAST request URL @@ -34,8 +34,10 @@ def test_astroquery_mast(): """Test if the astroquery.mast service can complete a request""" service = 'Mast.Caom.Filtered' - params = {'columns': 'COUNT_BIG(*)', 'filters': [], 'pagesize': 1, - 'page': 1} + params = {'columns': 'COUNT_BIG(*)', + 'filters': [{"paramName": "obs_collection", + "values": ["JWST"]},], + 'pagesize': 1, 'page': 1} response = Mast.service_request_async(service, params) result = response[0].json() @@ -47,7 +49,7 @@ def test_caom_instrument_keywords(): instruments""" kw = [] for ins in JWST_INSTRUMENT_NAMES: - kw.append(mm.instrument_keywords(ins, caom=True)['keyword'].tolist()) + kw.append(mu.instrument_keywords(ins, caom=True)['keyword'].tolist()) assert kw[0] == kw[1] == kw[2] == kw[3] == kw[4] @@ -57,7 +59,7 @@ def test_filtered_instrument_keywords(): different for all instruments""" kw = [] for ins in JWST_INSTRUMENT_NAMES: - kw.append(mm.instrument_keywords(ins, caom=False)['keyword'].tolist()) + kw.append(mu.instrument_keywords(ins, caom=False)['keyword'].tolist()) assert kw[0] != kw[1] != kw[2] != kw[3] != kw[4] @@ -65,7 +67,7 @@ def test_filtered_instrument_keywords(): def test_instrument_inventory_filtering(): """Test to see that the instrument inventory can be filtered""" filt = 'GR150R' - data = mm.instrument_inventory('niriss', + data = mu.instrument_inventory('niriss', add_filters={'filter': filt}, return_data=True) @@ -78,7 +80,7 @@ def test_instrument_dataproduct_filtering(): """Test to see that the instrument inventory can be filtered by data product""" dp = 'spectrum' - data = mm.instrument_inventory('nirspec', dataproduct=dp, caom=True, + data = mu.instrument_inventory('nirspec', dataproduct=dp, caom=True, return_data=True) dps = [row['dataproduct_type'] for row in data['data']] diff --git a/jwql/tests/test_monitor_utils.py b/jwql/tests/test_monitor_utils.py new file mode 100644 index 000000000..ca0208c7c --- /dev/null +++ b/jwql/tests/test_monitor_utils.py @@ -0,0 +1,47 @@ +#! /usr/bin/env python + +"""Tests for the ``monitor_utils`` module. + +Authors +------- + + - Melanie Clarke + +Use +--- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_monitor_utils.py +""" +import datetime + +import pytest + +from jwql.database.database_interface import session, Monitor +from jwql.tests.resources import has_test_db +from jwql.utils import monitor_utils + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_update_monitor_table(tmp_path): + module = 'test' + start_time = datetime.datetime.now() + log_file = tmp_path / 'test_log.txt' + log_file.write_text('Completed Successfully') + + try: + monitor_utils.update_monitor_table(module, start_time, log_file) + query = session.query(Monitor).filter(Monitor.monitor_name == module) + assert query.count() == 1 + assert query.first().status == 'SUCCESS' + finally: + # clean up + query = session.query(Monitor).filter(Monitor.monitor_name == module) + query.delete() + session.commit() + + assert session.query(Monitor).filter( + Monitor.monitor_name == module).count() == 0 diff --git a/jwql/tests/test_msata_monitor.py b/jwql/tests/test_msata_monitor.py new file mode 100644 index 000000000..fc0286d6c --- /dev/null +++ b/jwql/tests/test_msata_monitor.py @@ -0,0 +1,363 @@ +#! /usr/bin/env python + +"""Tests for the MSATA monitor module. + + Authors + ------- + + - Maria Pena-Guerrero + + Use + --- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_msata_monitor.py + """ + +import os +import warnings + +import pandas as pd +import numpy as np +import pytest +from random import randint +from datetime import datetime +from bokeh.embed import components +from bokeh.models import ColumnDataSource +from bokeh.plotting import figure + +from jwql.instrument_monitors.nirspec_monitors.ta_monitors.msata_monitor import MSATA +from jwql.database.database_interface import NIRSpecTAQueryHistory +from jwql.utils.utils import get_config, ensure_dir_exists +from jwql.utils import monitor_utils, permissions + +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + +# define the type of a Bokeh plot type +bokeh_plot_type = type(figure()) + + +def define_testdata(): + """Define the data to test with. + + Parameters + ---------- + nints : int + The number of integrations + + Returns + ------- + msata_data : pandas dataframe + """ + msata_dict = { + # info taken from main_hdr dict + 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], + 'date_obs': ['2022-06-22'], + 'visit_id': ['V09999001001P0000000002101'], + 'tafilter': ['F110W'], + 'detector': ['NRS1'], + 'readout': ['NRSRAPID'], + 'subarray': ['FULL'], + # info taken from ta_hdr dict + 'num_refstars': [12], + 'ta_status': ['SUCCESSFUL'], + 'status_rsn': ['-999'], + 'v2halffacet': [-0.27568], + 'v3halffacet': [0.10975], + 'v2msactr': [378.523987], + 'v3msactr': [-428.374481], + 'lsv2offset': [-999.0], + 'lsv3offset': [-999.0], + 'lsoffsetmag': [-999.0], + 'lsrolloffset': [-999.0], + 'lsv2sigma': [-999.0], + 'lsv3sigma': [-999.0], + 'lsiterations': [-999], + 'guidestarid': ['-999'], + 'guidestarx': [-999.0], + 'guidestary': [-999.0], + 'guidestarroll': [-999.0], + 'samx': [-999.0], + 'samy': [-999.0], + 'samroll': [-999.0], + 'stars_in_fit': [-999] + } + # add info from ta_table + num_refstars = msata_dict['num_refstars'][0] + msata_dict['box_peak_value'] = [[8000 for _ in range(num_refstars)]] + msata_dict['reference_star_mag'] = [[-999 for _ in range(num_refstars)]] + msata_dict['convergence_status'] = [['SUCCESS' for _ in range(num_refstars)]] + msata_dict['reference_star_number'] = [[i for i in range(num_refstars)]] + msata_dict['lsf_removed_status'] = [['-999' for i in range(num_refstars)]] + msata_dict['lsf_removed_reason'] = [['-999' for i in range(num_refstars)]] + msata_dict['lsf_removed_x'] = [[-999.0 for _ in range(num_refstars)]] + msata_dict['lsf_removed_y'] = [[-999.0 for _ in range(num_refstars)]] + msata_dict['planned_v2'] = [[-999.0 for _ in range(num_refstars)]] + msata_dict['planned_v3'] = [[-999.0 for _ in range(num_refstars)]] + + # create the additional arrays + number_status, status_colors = [], [] + for tas, do_str in zip(msata_dict['ta_status'], msata_dict['date_obs']): + if tas.lower() == 'unsuccessful': + number_status.append(0.0) + status_colors.append('red') + elif 'progress' in tas.lower(): + number_status.append(0.5) + status_colors.append('gray') + else: + number_status.append(1.0) + status_colors.append('blue') + + # add these to the bokeh data structure + msata_dict['number_status'] = number_status + msata_dict['status_colors'] = status_colors + + # create the dataframe + msata_data = pd.DataFrame(msata_dict) + return msata_data + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_mast_query_ta(): + """Test the ``query_mast`` function""" + + query_start = 59833.0 + query_end = 59844.6 + + # query mast + result = monitor_utils.mast_query_ta('nirspec', 'NRS_FULL_MSA', query_start, query_end) + + # eliminate duplicates (sometimes rate files are returned with cal files) + result = [r for r in result if r['productLevel'] == '2b'] + assert len(result) == 4 + + # query local model + alternate = monitor_utils.model_query_ta('nirspec', 'NRS_FULL_MSA', query_start, query_end) + assert len(alternate) == len(result) + + # check that filenames match up - model returns rootfiles, mast returns filenames + result = sorted(result, key=lambda x: x['filename']) + alternate = sorted(alternate, key=lambda x: x['root_name']) + for i, rootfile in enumerate(alternate): + assert rootfile['root_name'] in result[i]['filename'] + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_most_recent_search(): + """Test the ``most_recent_search`` function""" + + ta = MSATA() + ta.aperture = 'NRS_FULL_MSA' + ta.query_table = NIRSpecTAQueryHistory + + result = ta.most_recent_search() + + assert isinstance(result, float) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_status(): + """Test the ``plt_status`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_status() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_residual_offsets(): + """Test the ``plt_residual_offsets`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_residual_offsets() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_v2offset_time(): + """Test the ``plt_v2offset_time`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_v2offset_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_v3offset_time(): + """Test the ``plt_v3offset_time`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_v3offset_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_lsv2v3offsetsigma(): + """Test the ``plt_lsv2v3offsetsigma`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_lsv2v3offsetsigma() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_res_offsets_corrected(): + """Test the ``plt_res_offsets_corrected`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_res_offsets_corrected() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_v2offsigma_time(): + """Test the ``plt_v2offsigma_time`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_v2offsigma_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_v3offsigma_time(): + """Test the ``plt_v3offsigma_time`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_v3offsigma_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_roll_offset(): + """Test the ``plt_roll_offset`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_roll_offset() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_lsoffsetmag(): + """Test the ``plt_lsoffsetmag`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_lsoffsetmag() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_tot_number_of_stars(): + """Test the ``plt_tot_number_of_stars`` function""" + + ta = MSATA() + msata_data = define_testdata() + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_tot_number_of_stars() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_mags_time(): + """Test the ``plt_mags_time`` function""" + + ta = MSATA() + msata_data = define_testdata() + # create the additional data + colors_list, tot_number_of_stars = [], [] + color_dict, visit_id = {}, msata_data['visit_id'] + for i, _ in enumerate(visit_id): + tot_stars = len(msata_data['reference_star_number'][i]) + tot_number_of_stars.append(tot_stars) + ci = '#%06X' % randint(0, 0xFFFFFF) + if visit_id[i] not in color_dict: + color_dict[visit_id[i]] = ci + colors_list.append(color_dict[visit_id[i]]) + # add these to the bokeh data structure + msata_data['tot_number_of_stars'] = tot_number_of_stars + msata_data['colors_list'] = colors_list + ta.source = ColumnDataSource(data=msata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_mags_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_mk_plt_layout(): + """Test the ``mk_plt_layout`` function""" + + truth_script, truth_div = components(figure()) + + ta = MSATA() + ta.output_dir = os.path.join(get_config()['outputs'], 'msata_monitor/tests') + ensure_dir_exists(ta.output_dir) + + ta.output_file_name = os.path.join(ta.output_dir, "msata_layout.html") + ta.msata_data = define_testdata() + with warnings.catch_warnings(): + warnings.simplefilter('error') + script, div = ta.mk_plt_layout() + + # set group write permission for the test file + # to make sure others can overwrite it + permissions.set_permissions(ta.output_file_name) + + assert type(truth_script) == type(script) + assert type(truth_div) == type(div) diff --git a/jwql/tests/test_permissions.py b/jwql/tests/test_permissions.py index 67f2538e5..dd8878294 100755 --- a/jwql/tests/test_permissions.py +++ b/jwql/tests/test_permissions.py @@ -43,7 +43,7 @@ def test_directory(test_dir=TEST_DIRECTORY): Path to directory used for testing Yields - ------- + ------ test_dir : str Path to directory used for testing """ @@ -51,7 +51,7 @@ def test_directory(test_dir=TEST_DIRECTORY): yield test_dir if os.path.isdir(test_dir): - os.remove(test_dir) + os.rmdir(test_dir) def test_directory_permissions(test_directory): @@ -85,7 +85,7 @@ def test_file(test_dir=TEST_DIRECTORY): Path to directory used for testing Yields - ------- + ------ filename : str Path of file used for testing """ diff --git a/jwql/tests/test_pipeline_tools.py b/jwql/tests/test_pipeline_tools.py index c9328c748..a79d687c8 100644 --- a/jwql/tests/test_pipeline_tools.py +++ b/jwql/tests/test_pipeline_tools.py @@ -41,8 +41,9 @@ def test_completed_pipeline_steps(): filename : str File to be checked """ - - filename = os.path.join(get_config()['filesystem'], 'public', 'jw00312', 'jw00312002001', 'jw00312002001_02102_00001_nrcb4_rateints.fits') + filename = os.path.join(get_config()['filesystem'], 'public', 'jw02733', + 'jw02733001001', + 'jw02733001001_02101_00001_nrcb2_rateints.fits') completed_steps = pipeline_tools.completed_pipeline_steps(filename) true_completed = OrderedDict([('group_scale', False), ('dq_init', True), @@ -66,7 +67,6 @@ def test_completed_pipeline_steps(): assert completed_steps == true_completed -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Drizzle import issue with python 3.8') def test_get_pipeline_steps(): """Test that the proper pipeline steps are returned for an instrument @@ -107,9 +107,10 @@ def test_get_pipeline_steps(): # MIRI miri_req_steps = pipeline_tools.get_pipeline_steps('miri') - miri_steps = ['dq_init', 'saturation', 'firstframe', 'lastframe', - 'linearity', 'rscd', 'dark_current', 'refpix', 'jump', 'rate'] - not_required = ['group_scale', 'ipc', 'superbias', 'persistence'] + miri_steps = ['group_scale', 'dq_init', 'saturation', 'firstframe', 'lastframe', + 'reset', 'linearity', 'rscd', 'dark_current', 'refpix', 'jump', 'rate', + 'gain_scale'] + not_required = ['ipc', 'superbias', 'persistence'] miri_dict = OrderedDict({}) for step in miri_steps: miri_dict[step] = True @@ -138,7 +139,6 @@ def test_image_stack(): assert exptimes == [[10.5], [10.5], [10.5]] -@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Drizzle import issue with python 3.8') def test_steps_to_run(): """Test that the dictionaries for steps required and steps completed are correctly combined to create a dictionary of pipeline steps to diff --git a/jwql/tests/test_preview_image.py b/jwql/tests/test_preview_image.py index 70253da6b..ce0a7de5a 100644 --- a/jwql/tests/test_preview_image.py +++ b/jwql/tests/test_preview_image.py @@ -21,18 +21,17 @@ """ import glob +import numpy as np import os import pytest import shutil from astropy.io import fits +from jwst.datamodels import dqflags -from jwql.utils.preview_image import PreviewImage +from jwql.utils.preview_image import PreviewImage, crop_to_subarray from jwql.utils.utils import get_config, ensure_dir_exists -# directory to be created and populated during tests running -TEST_DIRECTORY = os.path.join(os.environ['HOME'], 'preview_image_test') - # Determine if tests are being run on Github Actions ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') @@ -42,34 +41,44 @@ ON_READTHEDOCS = os.environ['READTHEDOCS'] -@pytest.fixture(scope="module") -def test_directory(test_dir=TEST_DIRECTORY): - """Create a test directory for preview image. - - Parameters - ---------- - test_dir : str - Path to directory used for testing - - Yields - ------- - test_dir : str - Path to directory used for testing - +def test_crop_to_subarray(): + """Test that the code correctly crops larger arrays down to + the requested subarray """ - # Set up local test directory - ensure_dir_exists(test_dir) - yield test_dir - - # Tear down local test directory and any files within - if os.path.isdir(test_dir): - shutil.rmtree(test_dir) - - # Empty test directory on central storage - jpgs = glob.glob(os.path.join(get_config()['test_dir'], '*.jpg')) - thumbs = glob.glob(os.path.join(get_config()['test_dir'], '*.thumbs')) - for file in jpgs + thumbs: - os.remove(file) + # Set up a small DQ array that shows the location of reference pixels on 2 sides + dq = np.ones((10, 10), dtype=int) + dq[0:3, :] = 0 + dq[:, 0:3] = 0 + + # Set up a fits header + h = fits.ImageHDU([0]) + h.header['FILENAME'] = 'myfile_uncal.fits' + + # Specify that we are cropping the DQ array down to a 5x5 subarray + xdim = 5 + ydim = 5 + + # First test the case where the header has no info on subarray location + c = crop_to_subarray(dq, h.header, xdim, ydim) + expected = np.ones((ydim, xdim), dtype=int) + assert np.all(c == expected) + + # Next test the case where the header does include subarray location info + h.header['SUBSTRT1'] = 2 + h.header['SUBSTRT2'] = 4 + h.header['SUBSIZE1'] = 5 + h.header['SUBSIZE2'] = 5 + c = crop_to_subarray(dq, h.header, xdim, ydim) + expected = np.ones((5, 5), dtype=int) + expected[:, 0:2] = 0 + assert np.all(c == expected) + + # Tweak the y location and try again + h.header['SUBSTRT2'] = 1 + c = crop_to_subarray(dq, h.header, xdim, ydim) + expected = np.zeros((5, 5), dtype=int) + expected[3:, 2:] = 1 + assert np.all(c == expected) def get_test_fits_files(): @@ -91,9 +100,38 @@ def get_test_fits_files(): return [] +def test_get_nonsci_map(): + """Test the retrieval of the dq data from an HDUList + """ + # Create HDUList. Start with the case where there is no DQ extension + h0 = fits.PrimaryHDU([0]) + h1 = fits.ImageHDU(np.zeros((10, 10), dtype=int)) + hdulist = fits.HDUList([h0, h1]) + extensions = ['PRIMARY', 'ERR'] + xd = 10 + yd = 10 + dq = PreviewImage.get_nonsci_map(0, hdulist, extensions, xd, yd) + expected = np.ones((10, 10), dtype=bool) + assert np.all(dq == expected) + + # Now test the case where there is a DQ extension. And insert some + # NON_SCIENCE and REFERENCE_PIXEL flags + h1.header['EXTNAME'] = 'DQ' + extensions[1] = 'DQ' + hdulist['DQ'].data[1, 1] = dqflags.pixel['REFERENCE_PIXEL'] + hdulist['DQ'].data[3, 3] = dqflags.pixel['NON_SCIENCE'] + hdulist['DQ'].data[5, 5] = dqflags.pixel['HOT'] + hdulist['DQ'].data[7, 7] = dqflags.pixel['REFERENCE_PIXEL'] + dqflags.pixel['NON_SCIENCE'] + hdulist['DQ'].data[9, 9] = dqflags.pixel['REFERENCE_PIXEL'] + dqflags.pixel['DEAD'] + dq = PreviewImage.get_nonsci_map(0, hdulist, extensions, xd, yd) + expected = np.ones((10, 10), dtype=bool) + expected[1, 1] = expected[3, 3] = expected[7, 7] = expected[9, 9] = 0 + assert np.all(dq == expected) + + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') @pytest.mark.parametrize('filename', get_test_fits_files()) -def test_make_image(test_directory, filename): +def test_make_image(tmp_path, filename): """Use PreviewImage.make_image to create preview images of a sample JWST exposure. @@ -102,12 +140,12 @@ def test_make_image(test_directory, filename): Parameters ---------- - test_directory : str - Path of directory used for testing + tmp_path : pathlib.Path + Temporary directory to write to filename : str Path of FITS image to generate preview of """ - + test_directory = str(tmp_path) header = fits.getheader(filename) # Create and save the preview image or thumbnail @@ -118,26 +156,23 @@ def test_make_image(test_directory, filename): image.scaling = 'log' image.cmap = 'viridis' image.output_format = 'jpg' - image.thumbnail = create_thumbnail - - if create_thumbnail: - image.thumbnail_output_directory = test_directory - else: - image.preview_output_directory = test_directory - - image.make_image() + image.thumbnail_output_directory = test_directory + image.preview_output_directory = test_directory + image.make_image(create_thumbnail=create_thumbnail) except ValueError as error: print(error) if create_thumbnail: extension = 'thumb' + n_img = 1 else: extension = 'jpg' + n_img = header['NINTS'] # list of preview images - preview_image_filenames = glob.glob(os.path.join(test_directory, '*.{}'.format( - extension))) - assert len(preview_image_filenames) == header['NINTS'] + preview_image_filenames = glob.glob( + os.path.join(test_directory, '*.{}'.format(extension))) + assert len(preview_image_filenames) == n_img # clean up: delete preview images for file in preview_image_filenames: diff --git a/jwql/tests/test_protect_module.py b/jwql/tests/test_protect_module.py new file mode 100644 index 000000000..3bcafa5c1 --- /dev/null +++ b/jwql/tests/test_protect_module.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +"""Tests protect_module.py module + +Authors +------- + + - Bradley Sappington + +Use +--- + + These tests can be run via the command line (omit the -s to + suppress verbose output to stdout): + + :: + + pytest -s test_protect_module.py +""" +import os +from jwql.utils import protect_module as pm + +from pytest import fixture, mark +from jwql.utils.protect_module import lock_module, _PID_LOCKFILE_KEY + +# Determine if tests are being run on Github Actions +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + + +@fixture +def module_lock(): + module = __file__ + module_lock = module.replace('.py', '.lock') + return module_lock + + +@fixture +def do_not_email(): + pm.ALERT_EMAIL = False + return pm.ALERT_EMAIL + + +@lock_module +def protected_code_verify_file_exists_true(module_lock): + return os.path.exists(module_lock) + + +@lock_module +def protected_code_entered(): + return True + + +@mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_lock_module_create_destroy_file(module_lock, do_not_email): + """Test wrapper will create and destroy a lock file named by module if no other lock exists """ + + # Ensure lock file does not exist + if os.path.exists(module_lock): + os.remove(module_lock) + file_created = protected_code_verify_file_exists_true(module_lock) + file_exists = os.path.exists(module_lock) + # Assert that lock file was created in wrapper, and removed upon exit + assert (file_created and not file_exists) is True + + +@mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_already_locked_module_wont_run_with_bad_lock_file(module_lock, do_not_email): + """Test when wrapper encounters poorly formatted lock file, + it will not run, not delete lock file, and will send warn email """ + # create locked file in advance of calling protected code + with open(module_lock, "w"): + entered_protected_code = protected_code_entered() + # assert the invalid lock file was never deleted + assert os.path.exists(module_lock) + # assert that we never entered protected code + assert entered_protected_code is None + # clean the test + os.remove(module_lock) + + +@mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_already_locked_module_wont_run_with_legit_lock_file(module_lock, do_not_email): + """Test wrapper will not run if it encounters a correclty formatted lock_file """ + # create locked file with running PID in advance of calling protected code + with open(module_lock, "w") as lock_file: + lock_file.write(f"{_PID_LOCKFILE_KEY}{os.getpid()}\n") + entered_protected_code = protected_code_entered() + # assert that we never entered protected code because the PID is currently running + assert entered_protected_code is None + # clean the test + os.remove(module_lock) + + +@mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_lock_module_handles_stale_lock_files(module_lock, do_not_email): + """Test wrapper will correctly identify stale lock file, + delete it, and continue to run successfully """ + # create locked file with not running PID in advance of calling protected code + with open(module_lock, "w") as lock_file: + lock_file.write(f"{_PID_LOCKFILE_KEY}-9999\n") + file_created = protected_code_verify_file_exists_true(module_lock) + file_exists = os.path.exists(module_lock) + # Assert that lock file was created in wrapper, and removed upon exit + assert (file_created and not file_exists) is True diff --git a/jwql/tests/test_readnoise_monitor.py b/jwql/tests/test_readnoise_monitor.py index 92d7c1720..d00607dd5 100644 --- a/jwql/tests/test_readnoise_monitor.py +++ b/jwql/tests/test_readnoise_monitor.py @@ -19,15 +19,16 @@ from collections import OrderedDict import os -import pytest +from astropy.io import fits import numpy as np +import pytest -from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats +from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats, session from jwql.instrument_monitors.common_monitors import readnoise_monitor +from jwql.tests.resources import has_test_db from jwql.utils.utils import get_config - ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') @@ -133,3 +134,63 @@ def test_make_histogram(): assert counts == counts_truth assert bin_centers == bin_centers_truth + + +@pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') +def test_process(mocker, tmp_path): + hdul = fits.HDUList([ + fits.PrimaryHDU(header=fits.Header({ + 'DATE-OBS': 'test', + 'TIME-OBS': 'test', 'INSTRUME': 'NIRCam', + 'DETECTOR': 'nrcalong', 'READPATT': 'test', + 'SUBARRAY': 'test'})), + fits.ImageHDU(np.zeros((10, 10, 10, 10)), name='SCI')]) + filename = str(tmp_path / 'test_uncal_file.fits') + processed_file = str(tmp_path / 'test_refpix_file.fits') + hdul.writeto(filename, overwrite=True) + hdul.writeto(processed_file, overwrite=True) + + monitor = readnoise_monitor.Readnoise() + monitor.instrument = 'nircam' + monitor.detector = 'nrcalong' + monitor.aperture = 'test' + monitor.read_pattern = 'test' + monitor.subarray = 'test' + monitor.nints = 1 + monitor.ngroups = 1 + monitor.expstart = 9999.0 + monitor.data_dir = str(tmp_path) + monitor.identify_tables() + + assert not monitor.file_exists_in_database(filename) + + # mock the pipeline run + mocker.patch.object(readnoise_monitor, 'run_parallel_pipeline', + return_value={filename: processed_file}) + # mock amplifier info + mocker.patch.object(readnoise_monitor.instrument_properties, 'amplifier_info', + return_value=('test', 'test')) + mocker.patch.object(monitor, 'get_amp_stats', + return_value={'test': 0}) + # mock image creation + mocker.patch.object(monitor, 'make_readnoise_image', + return_value=np.zeros(10)) + mocker.patch.object(monitor, 'make_histogram', + return_value=(np.zeros(10), np.zeros(10))) + mocker.patch.object(monitor, 'image_to_png', + return_value=str(tmp_path / 'output.png')) + # mock crds + mocker.patch.object(readnoise_monitor.crds, 'getreferences', + side_effect=ValueError('no reffile')) + + try: + monitor.process([filename]) + assert monitor.file_exists_in_database(filename) + finally: + # clean up + query = session.query(monitor.stats_table).filter( + monitor.stats_table.uncal_filename == filename) + query.delete() + session.commit() + + assert not monitor.file_exists_in_database(filename) diff --git a/jwql/tests/test_redis_celery.py b/jwql/tests/test_redis_celery.py new file mode 100644 index 000000000..38e939b77 --- /dev/null +++ b/jwql/tests/test_redis_celery.py @@ -0,0 +1,112 @@ +#! /usr/bin/env python + +""" +Tests for the redis/celery server infrastructure. + +Authors +------- + + - Brian York + +Use +--- + + In order to run these tests, you need the following: + + - A running redis server (separate from the production server on pljwql2) + - A running celery worker communicating with that redis server + - A config.json file providing the redis URL, and pointing to the JWQL testing + files. + + These tests are intended to be run from the command line, because I haven't yet + figured out a way to actually set up the entire environment in pytest: + :: + + python test_redis_celery.py +""" + +from astropy.io import ascii, fits +from collections import defaultdict +from collections import OrderedDict +from copy import deepcopy +import datetime +import logging +import numpy as np +import os +from pathlib import Path +from pysiaf import Siaf +import pytest +from tempfile import TemporaryDirectory + +from jwql.instrument_monitors import pipeline_tools +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline +from jwql.utils import crds_tools, instrument_properties, monitor_utils +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.constants import FLAT_EXP_TYPES, DARK_EXP_TYPES +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path + + +def get_instrument(file_name): + if 'miri' in file_name: + return 'miri' + elif 'nircam' in file_name: + return 'nircam' + elif 'nrc' in file_name: + return 'nircam' + elif 'niriss' in file_name: + return 'niriss' + elif 'nis' in file_name: + return 'niriss' + elif 'nirspec' in file_name: + return 'nirspec' + elif 'nrs' in file_name: + return 'nirspec' + elif 'guider' in file_name: + return 'fgs' + return 'unknown' + + +if __name__ == "__main__": + config = get_config() + p = Path(config['test_data']) + + for file in p.rglob("*uncal.fits"): + print("Testing cal pipeline") + with TemporaryDirectory() as working_dir: + try: + print("Running in {}".format(working_dir)) + file_name = os.path.basename(file) + if "gs-" in file_name: + print("\tSkipping guide star file {}".format(file_name)) + continue + print("\tCopying {}".format(file)) + copy_files([file], working_dir) + cal_file = os.path.join(working_dir, file_name) + print("\t\tCalibrating {}".format(cal_file)) + instrument = get_instrument(file_name) + outputs = run_pipeline(cal_file, "uncal", "all", instrument) + print("\t\tDone {}".format(file)) + except Exception as e: + print("ERROR: {}".format(e)) + + print("Testing jump pipeline") + with TemporaryDirectory() as working_dir: + try: + print("Running in {}".format(working_dir)) + file_name = os.path.basename(file) + if "gs-" in file_name: + print("\tSkipping guide star file {}".format(file_name)) + continue + print("\tCopying {}".format(file)) + copy_files([file], working_dir) + cal_file = os.path.join(working_dir, file_name) + print("\t\tCalibrating {}".format(cal_file)) + instrument = get_instrument(file_name) + outputs = run_pipeline(cal_file, "uncal", "all", instrument, jump_pipe=True) + print("\t\tDone {}".format(file)) + except Exception as e: + print("ERROR: {}".format(e)) + + print("Done test") diff --git a/jwql/tests/test_setup.py b/jwql/tests/test_setup.py index 4cced8671..5eed720c8 100644 --- a/jwql/tests/test_setup.py +++ b/jwql/tests/test_setup.py @@ -29,4 +29,4 @@ def test_version_number(): assert isinstance(jwql.__version__, str) version_parts = jwql.__version__.split('.') - assert len(version_parts) == 3 + assert len(version_parts) >= 3 diff --git a/jwql/tests/test_utils.py b/jwql/tests/test_utils.py index 26d54eadb..cd50c6a01 100644 --- a/jwql/tests/test_utils.py +++ b/jwql/tests/test_utils.py @@ -22,7 +22,11 @@ from pathlib import Path import pytest -from jwql.utils.utils import copy_files, get_config, filename_parser, filesystem_path, _validate_config +from bokeh.models import LinearColorMapper +from bokeh.plotting import figure +import numpy as np + +from jwql.utils.utils import copy_files, get_config, filename_parser, filesystem_path, save_png, _validate_config # Determine if tests are being run on Github Actions @@ -43,7 +47,9 @@ 'program_id': '90002', 'suffix': 'rateints', 'visit': '001', - 'visit_group': '02'}), + 'visit_group': '02', + 'file_root': 'jw90002001001_02102_00001_nis', + 'group_root': 'jw90002001001_02102_00001'}), # Test full stage 1 and 2 filename ('jw00327001001_02101_00002_nrca1_rate.fits', @@ -57,7 +63,9 @@ 'program_id': '00327', 'suffix': 'rate', 'visit': '001', - 'visit_group': '02'}), + 'visit_group': '02', + 'file_root': 'jw00327001001_02101_00002_nrca1', + 'group_root': 'jw00327001001_02101_00002'}), # Test root stage 1 and 2 filename ('jw00327001001_02101_00002_nrca1', @@ -70,7 +78,20 @@ 'parallel_seq_id': '1', 'program_id': '00327', 'visit': '001', - 'visit_group': '02'}), + 'visit_group': '02', + 'file_root': 'jw00327001001_02101_00002_nrca1', + 'group_root': 'jw00327001001_02101_00002'}), + + # Test stage 2 MSA metadata filename + ('jw01118008001_01_msa.fits', + {'filename_type': 'stage_2_msa', + 'instrument': 'nirspec', + 'observation': '008', + 'program_id': '01118', + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw01118008001_01_msa', + 'group_root': 'jw01118008001_01_msa'}), # Test full stage 2c filename ('jw94015002002_02108_00001_mirimage_o002_crf.fits', @@ -85,7 +106,9 @@ 'program_id': '94015', 'suffix': 'crf', 'visit': '002', - 'visit_group': '02'}), + 'visit_group': '02', + 'file_root': 'jw94015002002_02108_00001_mirimage', + 'group_root': 'jw94015002002_02108_00001'}), # Test root stage 2c filename ('jw90001001003_02101_00001_nis_o001', @@ -99,7 +122,9 @@ 'parallel_seq_id': '1', 'program_id': '90001', 'visit': '003', - 'visit_group': '02'}), + 'visit_group': '02', + 'file_root': 'jw90001001003_02101_00001_nis', + 'group_root': 'jw90001001003_02101_00001'}), # Test full stage 3 filename with target_id ('jw80600-o009_t001_miri_f1130w_i2d.fits', @@ -109,7 +134,10 @@ 'optical_elements': 'f1130w', 'program_id': '80600', 'suffix': 'i2d', - 'target_id': 't001'}), + 'target_id': 't001', + 'detector': 'Unknown', + 'file_root': 'jw80600-o009_t001_miri_f1130w', + 'group_root': 'jw80600-o009_t001_miri_f1130w'}), # Test full stage 3 filename with target_id and different ac_id ('jw80600-c0001_t001_miri_f1130w_i2d.fits', @@ -119,7 +147,10 @@ 'optical_elements': 'f1130w', 'program_id': '80600', 'suffix': 'i2d', - 'target_id': 't001'}), + 'target_id': 't001', + 'detector': 'Unknown', + 'file_root': 'jw80600-c0001_t001_miri_f1130w', + 'group_root': 'jw80600-c0001_t001_miri_f1130w'}), # Test full stage 3 filename with source_id ('jw80600-o009_s00001_miri_f1130w_i2d.fits', @@ -129,7 +160,10 @@ 'optical_elements': 'f1130w', 'program_id': '80600', 'source_id': 's00001', - 'suffix': 'i2d'}), + 'suffix': 'i2d', + 'detector': 'Unknown', + 'file_root': 'jw80600-o009_s00001_miri_f1130w', + 'group_root': 'jw80600-o009_s00001_miri_f1130w'}), # Test stage 3 filename with target_id and epoch ('jw80600-o009_t001-epoch1_miri_f1130w_i2d.fits', @@ -140,7 +174,10 @@ 'optical_elements': 'f1130w', 'program_id': '80600', 'suffix': 'i2d', - 'target_id': 't001'}), + 'target_id': 't001', + 'detector': 'Unknown', + 'file_root': 'jw80600-o009_t001-epoch1_miri_f1130w', + 'group_root': 'jw80600-o009_t001-epoch1_miri_f1130w'}), # Test stage 3 filename with source_id and epoch ('jw80600-o009_s00001-epoch1_miri_f1130w_i2d.fits', @@ -151,7 +188,10 @@ 'optical_elements': 'f1130w', 'program_id': '80600', 'source_id': 's00001', - 'suffix': 'i2d'}), + 'suffix': 'i2d', + 'detector': 'Unknown', + 'file_root': 'jw80600-o009_s00001-epoch1_miri_f1130w', + 'group_root': 'jw80600-o009_s00001-epoch1_miri_f1130w'}), # Test root stage 3 filename with target_id ('jw80600-o009_t001_miri_f1130w', @@ -160,7 +200,10 @@ 'instrument': 'miri', 'optical_elements': 'f1130w', 'program_id': '80600', - 'target_id': 't001'}), + 'target_id': 't001', + 'detector': 'Unknown', + 'file_root': 'jw80600-o009_t001_miri_f1130w', + 'group_root': 'jw80600-o009_t001_miri_f1130w'}), # Test root stage 3 filename with source_id ('jw80600-o009_s00001_miri_f1130w', @@ -169,7 +212,10 @@ 'instrument': 'miri', 'optical_elements': 'f1130w', 'program_id': '80600', - 'source_id': 's00001'}), + 'source_id': 's00001', + 'detector': 'Unknown', + 'file_root': 'jw80600-o009_s00001_miri_f1130w', + 'group_root': 'jw80600-o009_s00001_miri_f1130w'}), # Test full time series filename ('jw00733003001_02101_00002-seg001_nrs1_rate.fits', @@ -184,7 +230,27 @@ 'segment': '001', 'suffix': 'rate', 'visit': '001', - 'visit_group': '02'}), + 'visit_group': '02', + 'file_root': 'jw00733003001_02101_00002-seg001_nrs1', + 'group_root': 'jw00733003001_02101_00002-seg001'}), + + # Test full time series filename for stage 2c + ('jw00733003001_02101_00002-seg001_nrs1_o001_crfints.fits', + {'ac_id': 'o001', + 'activity': '01', + 'detector': 'nrs1', + 'exposure_id': '00002', + 'filename_type': 'time_series_2c', + 'instrument': 'nirspec', + 'observation': '003', + 'parallel_seq_id': '1', + 'program_id': '00733', + 'segment': '001', + 'suffix': 'crfints', + 'visit': '001', + 'visit_group': '02', + 'file_root': 'jw00733003001_02101_00002-seg001_nrs1', + 'group_root': 'jw00733003001_02101_00002-seg001'}), # Test root time series filename ('jw00733003001_02101_00002-seg001_nrs1', @@ -198,7 +264,9 @@ 'program_id': '00733', 'segment': '001', 'visit': '001', - 'visit_group': '02'}), + 'visit_group': '02', + 'file_root': 'jw00733003001_02101_00002-seg001_nrs1', + 'group_root': 'jw00733003001_02101_00002-seg001'}), # Test full guider ID filename ('jw00729011001_gs-id_1_image_cal.fits', @@ -210,7 +278,25 @@ 'observation': '011', 'program_id': '00729', 'suffix': 'image_cal', - 'visit': '001'}), + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw00729011001_gs-id_1', + 'group_root': 'jw00729011001_gs-id_1'}), + + # Test full guider ID filename with 2-digit attempts + ('jw00729011001_gs-id_12_image_cal.fits', + {'date_time': None, + 'filename_type': 'guider', + 'guide_star_attempt_id': '12', + 'guider_mode': 'id', + 'instrument': 'fgs', + 'observation': '011', + 'program_id': '00729', + 'suffix': 'image_cal', + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw00729011001_gs-id_12', + 'group_root': 'jw00729011001_gs-id_12'}), # Test root guider ID filename ('jw00327001001_gs-id_2', @@ -221,7 +307,24 @@ 'instrument': 'fgs', 'observation': '001', 'program_id': '00327', - 'visit': '001'}), + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw00327001001_gs-id_2', + 'group_root': 'jw00327001001_gs-id_2'}), + + # Test root guider ID filename with 2-digit attempts + ('jw00327001001_gs-id_12', + {'date_time': None, + 'filename_type': 'guider', + 'guide_star_attempt_id': '12', + 'guider_mode': 'id', + 'instrument': 'fgs', + 'observation': '001', + 'program_id': '00327', + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw00327001001_gs-id_12', + 'group_root': 'jw00327001001_gs-id_12'}), # Test full guider non-ID filename ('jw86600048001_gs-fg_2016018175411_stream.fits', @@ -233,7 +336,10 @@ 'observation': '048', 'program_id': '86600', 'suffix': 'stream', - 'visit': '001'}), + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw86600048001_gs-fg_2016018175411', + 'group_root': 'jw86600048001_gs-fg_2016018175411'}), # Test root guider non-ID filename ('jw00729011001_gs-acq2_2019155024808', @@ -244,8 +350,37 @@ 'instrument': 'fgs', 'observation': '011', 'program_id': '00729', - 'visit': '001'}) - + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw00729011001_gs-acq2_2019155024808', + 'group_root': 'jw00729011001_gs-acq2_2019155024808'}), + + # Test segmented guider file + ('jw01118005001_gs-fg_2022150070312-seg002_uncal.fits', + {'date_time': '2022150070312', + 'filename_type': 'guider_segment', + 'guide_star_attempt_id': None, + 'guider_mode': 'fg', + 'instrument': 'fgs', + 'observation': '005', + 'program_id': '01118', + 'segment': '002', + 'suffix': 'uncal', + 'visit': '001', + 'detector': 'Unknown', + 'file_root': 'jw01118005001_gs-fg_2022150070312-seg002', + 'group_root': 'jw01118005001_gs-fg_2022150070312-seg002'}), + + # Test msa file + ('jw02560013001_01_msa.fits', + {'program_id': '02560', + 'observation': '013', + 'visit': '001', + 'filename_type': 'stage_2_msa', + 'instrument': 'nirspec', + 'detector': 'Unknown', + 'file_root': 'jw02560013001_01_msa', + 'group_root': 'jw02560013001_01_msa'}) ] @@ -307,8 +442,8 @@ def test_filename_parser_whole_filesystem(): for dir_name, _, file_list in os.walk(filesystem_dir): for file in file_list: if 'public' in file or 'proprietary' in file: - if file.endswith('.fits'): - all_files.append(os.path.join(dir_name, file)) + if file.endswith('.fits'): + all_files.append(os.path.join(dir_name, file)) # Run the filename_parser on all files bad_filenames = [] @@ -327,7 +462,7 @@ def test_filename_parser_whole_filesystem(): assert not fail, failure_msg -def test_filename_parser_nonJWST(): +def test_filename_parser_non_jwst(): """Attempt to generate a file parameter dictionary from a file that is not formatted in the JWST naming convention. Ensure the appropriate error is raised. @@ -340,14 +475,25 @@ def test_filename_parser_nonJWST(): @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_filesystem_path(): """Test that a file's location in the filesystem is returned""" - - filename = 'jw96003001001_02201_00001_nrca1_dark.fits' + filename = 'jw02733001001_02101_00001_nrcb2_rateints.fits' check = filesystem_path(filename) - location = os.path.join(get_config()['filesystem'], 'public', 'jw96003', 'jw96003001001', filename) + location = os.path.join(get_config()['filesystem'], 'public', 'jw02733', + 'jw02733001001', filename) assert check == location +def test_save_png(): + """Test that we can create a png file""" + plot = figure(title='test', tools='') + image = np.zeros((200, 200)) + image[100:105, 100:105] = 1 + ny, nx = image.shape + mapper = LinearColorMapper(palette='Viridis256', low=0, high=1.1) + imgplot = plot.image(image=[image], x=0, y=0, dw=nx, dh=ny, color_mapper=mapper, level="image") + save_png(plot, filename='test.png') + + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_validate_config(): """Test that the config validator works.""" @@ -364,8 +510,6 @@ def test_validate_config(): good_config_dict = { "admin_account": "", "auth_mast": "", - "client_id": "", - "client_secret": "", "connection_string": "", "database": { "engine": "", diff --git a/jwql/tests/test_wata_monitor.py b/jwql/tests/test_wata_monitor.py new file mode 100644 index 000000000..0b9099beb --- /dev/null +++ b/jwql/tests/test_wata_monitor.py @@ -0,0 +1,315 @@ +#! /usr/bin/env python + +"""Tests for the WATA monitor module. + + Authors + ------- + + - Maria Pena-Guerrero + + Use + --- + + These tests can be run via the command line (omit the ``-s`` to + suppress verbose output to stdout): + :: + + pytest -s test_msata_monitor.py + """ + +import os +import pandas as pd +import numpy as np +import pytest +from datetime import datetime +from bokeh.embed import components +from bokeh.models import ColumnDataSource +from bokeh.plotting import figure + +from jwql.instrument_monitors.nirspec_monitors.ta_monitors.wata_monitor import WATA +from jwql.database.database_interface import NIRSpecTAQueryHistory +from jwql.utils.utils import get_config, ensure_dir_exists +from jwql.utils import monitor_utils, permissions + +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + +# define the type of a Bokeh plot type +bokeh_plot_type = type(figure()) + + +def define_testdata(): + """Define the data to test with. + + Parameters + ---------- + nints : int + The number of integrations + + Returns + ------- + wata_data : pandas dataframe + """ + wata_dict = { + # info taken from main_hdr dict + 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], + 'date_obs': ['2022-06-22'], + 'visit_id': ['V09999001001P0000000002101'], + 'tafilter': ['F110W'], + 'detector': ['NRS1'], + 'readout': ['NRSRAPID'], + 'subarray': ['FULL'], + # info taken from ta_hdr dict + 'ta_status': ['SUCCESSFUL'], + 'status_reason': ['-999'], + 'star_name': ['-999'], + 'star_ra': [-999.0], + 'star_dec': [-999.0], + 'star_mag': [-999.0], + 'star_catalog': [-999], + 'planned_v2': [-999.0], + 'planned_v3': [-999.0], + 'stamp_start_col': [-999], + 'stamp_start_row': [-999], + 'star_detector': ['-999'], + 'max_val_box': [-999.0], + 'max_val_box_col': [-999.0], + 'max_val_box_row': [-999.0], + 'iterations': [-999], + 'corr_col': [-999.0], + 'corr_row': [-999.0], + 'stamp_final_col': [-999.0], + 'stamp_final_row': [-999.0], + 'detector_final_col': [-999.0], + 'detector_final_row': [-999.0], + 'final_sci_x': [-999.0], + 'final_sci_y': [-999.0], + 'measured_v2': [-999.0], + 'measured_v3': [-999.0], + 'ref_v2': [-999.0], + 'ref_v3': [-999.0], + 'v2_offset': [-999.0], + 'v3_offset': [-999.0], + 'sam_x': [-999.0], + 'sam_y': [-999.0], + } + # create the additional arrays + bool_status, status_colors = [], [] + for tas, do_str in zip(wata_dict['ta_status'], wata_dict['date_obs']): + if 'unsuccessful' not in tas.lower(): + bool_status.append(1) + status_colors.append('blue') + else: + bool_status.append(0) + status_colors.append('red') + + # add these to the bokeh data structure + wata_dict['ta_status_bool'] = bool_status + wata_dict['status_colors'] = status_colors + + # create the dataframe + wata_data = pd.DataFrame(wata_dict) + return wata_data + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_query_ta(): + """Test the ``query_mast`` function""" + + query_start = 59833.0 + query_end = 59844.6 + + # query mast + result = monitor_utils.mast_query_ta('nirspec', 'NRS_S1600A1_SLIT', query_start, query_end) + + # eliminate duplicates (sometimes rate files are returned with cal files) + result = [r for r in result if r['productLevel'] == '2b'] + assert len(result) == 16 + + # query local model + alternate = monitor_utils.model_query_ta('nirspec', 'NRS_S1600A1_SLIT', query_start, query_end) + assert len(alternate) == len(result) + + # check that filenames match up - model returns rootfiles, mast returns filenames + result = sorted(result, key=lambda x: x['filename']) + alternate = sorted(alternate, key=lambda x: x['root_name']) + for i, rootfile in enumerate(alternate): + assert rootfile['root_name'] in result[i]['filename'] + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_most_recent_search(): + """Test the ``most_recent_search`` function""" + + ta = WATA() + ta.aperture = 'NRS_S1600A1_SLIT' + ta.query_table = NIRSpecTAQueryHistory + + result = ta.most_recent_search() + + assert isinstance(result, float) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_status(): + """Test the ``plt_status`` function""" + + ta = WATA() + wata_data = define_testdata() + ta.source = ColumnDataSource(data=wata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_status() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_residual_offsets(): + """Test the ``plt_residual_offsets`` function""" + + ta = WATA() + wata_data = define_testdata() + ta.source = ColumnDataSource(data=wata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_residual_offsets() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_v2offset_time(): + """Test the ``plt_v2offset_time`` function""" + + ta = WATA() + wata_data = define_testdata() + ta.source = ColumnDataSource(data=wata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_v2offset_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_v3offset_time(): + """Test the ``plt_v3offset_time`` function""" + + ta = WATA() + wata_data = define_testdata() + ta.source = ColumnDataSource(data=wata_data) + ta.add_time_column() + ta.setup_date_range() + result = ta.plt_v3offset_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_plt_mag_time(): + """Test the ``plt_mag_time`` function""" + + ta = WATA() + wata_data = define_testdata() + ta.source = ColumnDataSource(data=wata_data) + ta.add_time_column() + ta.setup_date_range() + + # create the arrays per filter and readout pattern + nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], [] + nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], [] + filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout'] + max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr'] + for i, val in enumerate(max_val_box): + if '140' in filter_used[i]: + if readout[i].lower() == 'nrsrapid': + nrsrapid_f140x.append(val) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif readout[i].lower() == 'nrsrapidd6': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(val) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif '110' in filter_used[i]: + if readout[i].lower() == 'nrsrapid': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(val) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif readout[i].lower() == 'nrsrapidd6': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(val) + nrsrapidd6_clear.append(np.NaN) + else: + if readout[i].lower() == 'nrsrapid': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(val) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(np.NaN) + elif readout[i].lower() == 'nrsrapidd6': + nrsrapid_f140x.append(np.NaN) + nrsrapid_f110w.append(np.NaN) + nrsrapid_clear.append(np.NaN) + nrsrapidd6_f140x.append(np.NaN) + nrsrapidd6_f110w.append(np.NaN) + nrsrapidd6_clear.append(val) + # add to the bokeh data structure + ta.source.data["nrsrapid_f140x"] = nrsrapid_f140x + ta.source.data["nrsrapid_f110w"] = nrsrapid_f110w + ta.source.data["nrsrapid_clear"] = nrsrapid_clear + ta.source.data["nrsrapidd6_f140x"] = nrsrapidd6_f140x + ta.source.data["nrsrapidd6_f110w"] = nrsrapidd6_f110w + ta.source.data["nrsrapidd6_clear"] = nrsrapidd6_clear + result = ta.plt_mag_time() + + assert bokeh_plot_type == type(result) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_get_unsuccessful_ta(): + """Test the ``get_unsuccessful_ta`` function""" + + ta = WATA() + wata_data = define_testdata() + ta.source = ColumnDataSource(data=wata_data) + ta.add_time_column() + ta.setup_date_range() + + list_failed, list_else = ta.get_unsuccessful_ta('ta_status_bool') + + assert list_else[0] == ta.source.data['ta_status_bool'][0] + assert np.isnan(list_failed[0]) + + +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') +def test_mk_plt_layout(): + """Test the ``mk_plt_layout`` function""" + + truth_script, truth_div = components(figure()) + + ta = WATA() + ta.output_dir = os.path.join(get_config()['outputs'], 'wata_monitor/tests') + ensure_dir_exists(ta.output_dir) + ta.output_file_name = os.path.join(ta.output_dir, "wata_layout.html") + ta.wata_data = define_testdata() + script, div = ta.mk_plt_layout() + + # set group write permission for the test file + # to make sure others can overwrite it + permissions.set_permissions(ta.output_file_name) + + assert type(script) == type(truth_script) + assert type(div) == type(truth_div) diff --git a/jwql/utils/anomaly_query_config.py b/jwql/utils/anomaly_query_config.py deleted file mode 100644 index c3ee848f6..000000000 --- a/jwql/utils/anomaly_query_config.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Globally defined and used variables for the JWQL query anomaly -feature. Variables will be re-defined when anomaly query forms are -submitted. - -Authors -------- - - - Teagan King - - -Use ---- - This variables within this module are intended to be directly - imported, e.g.: - :: - - from jwql.utils.query_config import CHOSEN_INSTRUMENTS -""" -# Anomalies selected by user in anomaly_query -ANOMALIES_CHOSEN_FROM_CURRENT_ANOMALIES = {} - -# Apertures selected by user in anomaly_query -APERTURES_CHOSEN = {} - -# Anomalies available to select after instruments are selected in anomaly_query -# Default is all anomalies common to all instruments -CURRENT_ANOMALIES = {} - -# Observing modes selected by user in anomaly_query -DETECTORS_CHOSEN = {} - -# Maximum exposure time selected by user in anomaly_query. -# Corresponds to EFFEXPTM in MAST. -EXPTIME_MAX = ['999999999999999'] # select all as default - -# Minimum exposure time selected by user in anomaly_query. -# Corresponds to EFFEXPTM in MAST. -EXPTIME_MIN = ['0'] # select all as default - -# Exposure types selected by user in anomaly_query -EXPTYPES_CHOSEN = {} - -# Filters selected by user in anomaly_query -FILTERS_CHOSEN = {} - -# Gratings selected by user in anomaly_query -GRATINGS_CHOSEN = {} - -# Instruments selected by user in anomaly_query -INSTRUMENTS_CHOSEN = [] - -# Include all of the parameters selected by user in anomaly query -PARAMETERS = {} - -# Read patterns selected by user in anomaly_query -READPATTS_CHOSEN = {} - -# Thumbnails selected by user in anomaly_query -THUMBNAILS = [] diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index b0f87341e..39db77e92 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -8,6 +8,10 @@ - Bryan Hilbert - Ben Sunnquist - Teagan King + - Mike Engesser + - Maria Pena-Guerrero + - Rachel Cooper + - Brad Sappington Use --- @@ -30,338 +34,985 @@ # and the second for y coordinates. Within each tuple are value for # starting, ending, and step size. Step size is needed for MIRI, where # the pixels corresponding to the 4 amplifiers are interleaved. -AMPLIFIER_BOUNDARIES = {'nircam': {'1': [(0, 512, 1), (0, 2048, 1)], - '2': [(512, 1024, 1), (0, 2048, 1)], - '3': [(1024, 1536, 1), (0, 2048, 1)], - '4': [(1536, 2048, 1), (0, 2048, 1)]}, - 'niriss': {'1': [(0, 2048, 1), (0, 512, 1)], - '2': [(0, 2048, 1), (512, 1024, 1)], - '3': [(0, 2048, 1), (1024, 1536, 1)], - '4': [(0, 2048, 1), (1536, 2048, 1)]}, - 'fgs': {'1': [(0, 512, 1), (0, 2048, 1)], - '2': [(512, 1024, 1), (0, 2048, 1)], - '3': [(1024, 1536, 1), (0, 2048, 1)], - '4': [(1536, 2048, 1), (0, 2048, 1)]}, - 'nirspec': {'1': [(0, 2048, 1), (0, 512, 1)], - '2': [(0, 2048, 1), (512, 1024, 1)], - '3': [(0, 2048, 1), (1024, 1536, 1)], - '4': [(0, 2048, 1), (1536, 2048, 1)]}, - 'miri': {'1': [(0, 1032, 4), (0, 1024, 1)], - '2': [(1, 1032, 4), (0, 1024, 1)], - '3': [(2, 1032, 4), (0, 1024, 1)], - '4': [(3, 1032, 4), (0, 1024, 1)]}} +AMPLIFIER_BOUNDARIES = { + "nircam": { + "1": [(0, 512, 1), (0, 2048, 1)], + "2": [(512, 1024, 1), (0, 2048, 1)], + "3": [(1024, 1536, 1), (0, 2048, 1)], + "4": [(1536, 2048, 1), (0, 2048, 1)], + }, + "niriss": { + "1": [(0, 2048, 1), (0, 512, 1)], + "2": [(0, 2048, 1), (512, 1024, 1)], + "3": [(0, 2048, 1), (1024, 1536, 1)], + "4": [(0, 2048, 1), (1536, 2048, 1)], + }, + "fgs": { + "1": [(0, 512, 1), (0, 2048, 1)], + "2": [(512, 1024, 1), (0, 2048, 1)], + "3": [(1024, 1536, 1), (0, 2048, 1)], + "4": [(1536, 2048, 1), (0, 2048, 1)], + }, + "nirspec": { + "1": [(0, 2048, 1), (0, 512, 1)], + "2": [(0, 2048, 1), (512, 1024, 1)], + "3": [(0, 2048, 1), (1024, 1536, 1)], + "4": [(0, 2048, 1), (1536, 2048, 1)], + }, + "miri": { + "1": [(0, 1032, 4), (0, 1024, 1)], + "2": [(1, 1032, 4), (0, 1024, 1)], + "3": [(2, 1032, 4), (0, 1024, 1)], + "4": [(3, 1032, 4), (0, 1024, 1)], + }, +} # Dictionary describing instruments to which anomalies apply ANOMALIES_PER_INSTRUMENT = { # anomalies affecting all instruments: - 'cosmic_ray_shower': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], - 'diffraction_spike': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], - 'excessive_saturation': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], - 'guidestar_failure': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], - 'persistence': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], + "cosmic_ray_shower": ["fgs", "miri", "nircam", "niriss", "nirspec"], + "diffraction_spike": ["fgs", "miri", "nircam", "niriss", "nirspec"], + "excessive_saturation": ["fgs", "miri", "nircam", "niriss", "nirspec"], + "guidestar_failure": ["fgs", "miri", "nircam", "niriss", "nirspec"], + "persistence": ["fgs", "miri", "nircam", "niriss", "nirspec"], # anomalies affecting multiple instruments: - 'crosstalk': ['fgs', 'nircam', 'niriss', 'nirspec'], - 'data_transfer_error': ['fgs', 'nircam', 'niriss', 'nirspec'], - 'ghost': ['fgs', 'nircam', 'niriss', 'nirspec'], - 'snowball': ['fgs', 'nircam', 'niriss', 'nirspec'], + "crosstalk": ["fgs", "nircam", "niriss", "nirspec"], + "data_transfer_error": ["fgs", "nircam", "niriss", "nirspec"], + "ghost": ["fgs", "nircam", "niriss", "nirspec"], + "snowball": ["fgs", "nircam", "niriss", "nirspec"], # instrument-specific anomalies: - 'column_pull_up': ['miri'], - 'dominant_msa_leakage': ['nirspec'], - 'dragons_breath': ['nircam'], - 'glow': ['miri'], - 'internal_reflection': ['miri'], - 'optical_short': ['nirspec'], # Only for MOS observations - 'row_pull_down': ['miri'], + "column_pull_up": ["miri"], + "column_pull_down": ["miri"], + "Dominant_MSA_Leakage": ["nirspec"], + "dragons_breath": ["nircam"], + "MRS_Glow": ["miri"], + "MRS_Zipper": ["miri"], + "internal_reflection": ["miri"], + "optical_short": ["nirspec"], # Only for MOS observations + "row_pull_up": ["miri"], + "row_pull_down": ["miri"], + "LRS_Contamination": ["miri"], + "tree_rings": ["miri"], + "scattered_light": ["niriss", "nircam"], + "claws": ["nircam"], + "wisps": ["nircam"], + "tilt_event": ["nircam"], + "light_saber": ["niriss"], # additional anomalies: - 'other': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec']} + "other": ["fgs", "miri", "nircam", "niriss", "nirspec"], +} +# anomalies that shouldn't be 'titleized' +special_cases = ["Dominant_MSA_Leakage", "MRS_Glow", "MRS_Zipper", "LRS_Contamination"] # Defines the possible anomalies to flag through the web app -ANOMALY_CHOICES = [(anomaly, inflection.titleize(anomaly)) if anomaly != "dominant_msa_leakage" - else (anomaly, "Dominant MSA Leakage") - for anomaly in ANOMALIES_PER_INSTRUMENT] - -ANOMALY_CHOICES_FGS = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES_PER_INSTRUMENT - if 'fgs' in ANOMALIES_PER_INSTRUMENT[anomaly]] - -ANOMALY_CHOICES_MIRI = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES_PER_INSTRUMENT - if 'miri' in ANOMALIES_PER_INSTRUMENT[anomaly]] - -ANOMALY_CHOICES_NIRCAM = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES_PER_INSTRUMENT - if 'nircam' in ANOMALIES_PER_INSTRUMENT[anomaly]] - -ANOMALY_CHOICES_NIRISS = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES_PER_INSTRUMENT - if 'niriss' in ANOMALIES_PER_INSTRUMENT[anomaly]] - -ANOMALY_CHOICES_NIRSPEC = [(anomaly, inflection.titleize(anomaly)) if anomaly != "dominant_msa_leakage" - else (anomaly, "Dominant MSA Leakage") - for anomaly in ANOMALIES_PER_INSTRUMENT - if 'nirspec' in ANOMALIES_PER_INSTRUMENT[anomaly]] - -ANOMALY_CHOICES_PER_INSTRUMENT = {'fgs': ANOMALY_CHOICES_FGS, - 'miri': ANOMALY_CHOICES_MIRI, - 'nircam': ANOMALY_CHOICES_NIRCAM, - 'niriss': ANOMALY_CHOICES_NIRISS, - 'nirspec': ANOMALY_CHOICES_NIRSPEC - } - -APERTURES_PER_INSTRUMENT = {'NIRCAM': ['NRCA1_FULL', 'NRCA2_FULL', 'NRCA3_FULL', 'NRCA4_FULL', - 'NRCA5_FULL', 'NRCB1_FULL', 'NRCB2_FULL', 'NRCB3_FULL', - 'NRCB4_FULL', 'NRCB5_FULL'], - 'NIRISS': ['NIS_CEN', 'NIS_SOSSFULL', 'NIS_AMIFULL', 'NIS_AMI1', - 'NIS_SUBSTRIP256', 'NIS_SUBSTRIP96', - 'NIS_SUB64', 'NIS_SUB128', 'NIS_SUB256'], - 'NIRSPEC': ['NRS_FULL_MSA', 'NRS_FULL_IFU', 'NRS_S200A1_SLIT', 'NRS_S200A2_SLIT', - 'NRS_S400A1_SLIT', 'NRS_S1600A1_SLIT', 'NRS_S200B1_SLIT'], - 'MIRI': ['MIRIM_SUB64', 'MIRIM_SUB128', 'MIRIM_SUB256', 'MIRIM_MASK1140', - 'MIRIM_MASK1065', 'MIRIM_MASK1550', 'MIRIM_MASKLYOT', - 'MIRIM_BRIGHTSKY', 'MIRIM_SLITLESSPRISM'], - 'FGS': ['FGS1_FULL', 'FGS2_FULL']} +ANOMALY_CHOICES = [ + (anomaly, inflection.titleize(anomaly)) + if anomaly not in special_cases + else (anomaly, anomaly.replace("_", " ")) + for anomaly in ANOMALIES_PER_INSTRUMENT +] + +ANOMALY_CHOICES_FGS = [ + (anomaly, inflection.titleize(anomaly)) + for anomaly in ANOMALIES_PER_INSTRUMENT + if "fgs" in ANOMALIES_PER_INSTRUMENT[anomaly] +] + +ANOMALY_CHOICES_MIRI = [ + (anomaly, inflection.titleize(anomaly)) + if anomaly not in special_cases + else (anomaly, anomaly.replace("_", " ")) + for anomaly in ANOMALIES_PER_INSTRUMENT + if "miri" in ANOMALIES_PER_INSTRUMENT[anomaly] +] + +ANOMALY_CHOICES_NIRCAM = [ + (anomaly, inflection.titleize(anomaly)) + for anomaly in ANOMALIES_PER_INSTRUMENT + if "nircam" in ANOMALIES_PER_INSTRUMENT[anomaly] +] + +ANOMALY_CHOICES_NIRISS = [ + (anomaly, inflection.titleize(anomaly)) + for anomaly in ANOMALIES_PER_INSTRUMENT + if "niriss" in ANOMALIES_PER_INSTRUMENT[anomaly] +] + +ANOMALY_CHOICES_NIRSPEC = [ + (anomaly, inflection.titleize(anomaly)) + if anomaly not in special_cases + else (anomaly, anomaly.replace("_", " ")) + for anomaly in ANOMALIES_PER_INSTRUMENT + if "nirspec" in ANOMALIES_PER_INSTRUMENT[anomaly] +] + +ANOMALY_CHOICES_PER_INSTRUMENT = { + "fgs": ANOMALY_CHOICES_FGS, + "miri": ANOMALY_CHOICES_MIRI, + "nircam": ANOMALY_CHOICES_NIRCAM, + "niriss": ANOMALY_CHOICES_NIRISS, + "nirspec": ANOMALY_CHOICES_NIRSPEC, +} + +APERTURES_PER_INSTRUMENT = { + "nircam": [], # NIRCAM aperture redundant, can just use Subarray + Detector + "niriss": [], # NIRISS preferred subarray only + "nirspec": [ + "NRS_FULL_MSA", + "NRS_FULL_IFU", + "NRS_S200A1_SLIT", + "NRS_S200A2_SLIT", + "NRS_S400A1_SLIT", + "NRS_S1600A1_SLIT", + "NRS_S200B1_SLIT", + ], + "miri": [], # MIRI preferred subarray only + "fgs": ["FGS1_FULL", "FGS2_FULL"], +} # Observing templates used for ASIC tuning. MAST query results that # have one of these templates will be ignored -ASIC_TEMPLATES = ['ISIM ASIC Tuning'] +ASIC_TEMPLATES = ["ISIM ASIC Tuning"] # Bad pixel types by the type of data used to find them -BAD_PIXEL_TYPES = ['DEAD', 'HOT', 'LOW_QE', 'RC', 'OPEN', 'ADJ_OPEN', 'TELEGRAPH', 'OTHER_BAD_PIXEL'] -DARKS_BAD_PIXEL_TYPES = ['HOT', 'RC', 'OTHER_BAD_PIXEL', 'TELEGRAPH'] -FLATS_BAD_PIXEL_TYPES = ['DEAD', 'OPEN', 'ADJ_OPEN', 'LOW_QE'] +BAD_PIXEL_TYPES = [ + "DEAD", + "HOT", + "LOW_QE", + "RC", + "OPEN", + "ADJ_OPEN", + "TELEGRAPH", + "OTHER_BAD_PIXEL", +] +DARKS_BAD_PIXEL_TYPES = ["HOT", "RC", "OTHER_BAD_PIXEL", "TELEGRAPH"] +FLATS_BAD_PIXEL_TYPES = ["DEAD", "OPEN", "ADJ_OPEN", "LOW_QE"] + +# The maximum number of bad pixels allowed on a bad pixel monitor plot. If there +# are more than this number of bad pixels identified for a particular type of +# bad pixel, then the figure is saved as a png rather than an interactive plot, +# in order to reduce the amount of data sent to the browser. +BAD_PIXEL_MONITOR_MAX_POINTS_TO_PLOT = 15000 # Possible exposure types for dark current data -DARK_EXP_TYPES = {'nircam': ['NRC_DARK'], - 'niriss': ['NIS_DARK'], - 'miri': ['MIR_DARKIMG', 'MIR_DARKMRS', 'MIR_DARKALL'], - 'nirspec': ['NRS_DARK'], - 'fgs': ['FGS_DARK']} +DARK_EXP_TYPES = { + "nircam": ["NRC_DARK"], + "niriss": ["NIS_DARK"], + "miri": ["MIR_DARKIMG", "MIR_DARKMRS", "MIR_DARKALL"], + "nirspec": ["NRS_DARK"], + "fgs": ["FGS_DARK"], +} + +# Types of potential bad pixels identified by the dark current monitor +DARK_MONITOR_BADPIX_TYPES = ["hot", "dead", "noisy"] + +# Maximum number of potential new bad pixels to overplot on the dark monitor +# mean dark image plot. Too many overplotted points starts to obscure the image +# itself, and are most likely not really new bad pixels +DARK_MONITOR_MAX_BADPOINTS_TO_PLOT = 1000 # Dictionary of observing modes available for each instrument -DETECTOR_PER_INSTRUMENT = {'miri': ['MIRIFULONG', 'MIRIFUSHORT', 'MIRIMAGE'], - 'nircam': ['NRCB4', 'NRCA4', 'NRCA2', 'NRCALONG', - 'NRCBLONG', 'NRCB2', 'NRCB3', 'NRCA1', - 'NRCA3', 'NRCB1'], - 'niriss': ['NIS'], - 'nirspec': ['NRS1', 'NRS2'], - 'fgs': ['GUIDER1', 'GUIDER2']} - -EXP_TYPE_PER_INSTRUMENT = {'fgs': ['FGS_FOCUS', 'FGS_IMAGE', 'FGS_INTFLAT', - 'FGS_SKYFLAT', 'FGS_DARK'], - 'miri': ['MIR_FLATMRS', 'MIR_MRS', 'MIR_FLATIMAGE', - 'MIR_DARK', 'MIR_LYOT', 'MIR_IMAGE', - 'MIR_LRS-FIXEDSLIT', 'MIR_LRS-SLITLESS', - 'MIR_CORONCAL', 'MIR_4QPM', 'MIR_FLATIMAGE-EXT', - 'MIR_TACQ', 'MIR_DARKMRS', - 'MIR_DARKIMG', 'MIR_FLATMRS-EXT', 'MIR_TACONFIRM'], - 'nircam': ['NRC_LED', 'NRC_DARK', 'NRC_CORON', - 'NRC_IMAGE', 'NRC_FOCUS', 'NRC_TSGRISM', - 'NRC_TSIMAGE', 'NRC_WFSS', 'NRC_TACQ', - 'NRC_TACONFIRM', 'NRC_FLAT', 'NRC_GRISM'], - 'niriss': ['NIS_IMAGE', 'NIS_FOCUS', 'NIS_SOSS', - 'NIS_AMI', 'NIS_LAMP', 'NIS_WFSS', 'NIS_DARK', - 'NIS_EXTCAL', 'NIS_TACONFIRM', 'NIS_TACQ'], - 'nirspec': ['NRS_IFU', 'NRS_MSASPEC', 'NRS_BRIGHTOBJ', 'NRS_DARK', - 'NRS_AUTOWAVE', 'NRS_LAMP', 'NRS_AUTOFLAT', 'NRS_IMAGE', - 'NRS_CONFIRM', 'NRS_FIXEDSLIT', 'NRS_MIMF', 'NRS_FOCUS', - 'NRS_TACONFIRM', 'NRS_WATA', 'NRS_MSATA']} - -EXPTYPES = {"nircam": {"imaging": "NRC_IMAGE", "ts_imaging": "NRC_TSIMAGE", - "wfss": "NRC_WFSS", "ts_grism": "NRC_TSGRISM"}, - "niriss": {"imaging": "NIS_IMAGE", "ami": "NIS_IMAGE", - "pom": "NIS_IMAGE", "wfss": "NIS_WFSS"}, - "fgs": {"imaging": "FGS_IMAGE"}} - -FLAT_EXP_TYPES = {'nircam': ['NRC_FLAT'], - 'niriss': ['NIS_LAMP'], - 'miri': ['MIR_FLATIMAGE', 'MIR_FLATMRS'], - 'nirspec': ['NRS_AUTOFLAT', 'NRS_LAMP'], - 'fgs': ['FGS_INTFLAT']} - -FILTERS_PER_INSTRUMENT = {'fgs': [], - 'miri': ['F1000W', 'F1130W', 'F1280W', 'OPAQUE', 'F2300C', 'F560W', 'P750L', - 'F1500W', 'F2550W', 'F770W', 'FLENS', 'FND', 'F2100W', 'F1800W', - 'F1550C', 'F1140C', 'F2550WR', 'F1065C'], - 'nircam': ['F070W', 'F090W', 'F115W', 'F140M', 'F150W', 'F150W2', 'F182M', - 'F187N', 'F200W', 'F210M', 'F212N', 'F250M', 'F277W', 'F300M', - 'F322W2', 'F335M', 'F356W', 'F360M', 'F410M', 'F430M', 'F444W', - 'F460M', 'F480M'], - 'niriss': ['CLEAR', 'F380M', 'F480M', 'GR150R', 'F430M', 'GR150C', 'F444W', - 'F356W', 'F277W'], - 'nirspec': ['F290LP', 'F170LP', 'OPAQUE', 'F100LP', 'F070LP', 'F140X', 'CLEAR', 'F110W']} - -FOUR_AMP_SUBARRAYS = ['WFSS128R', 'WFSS64R'] +DETECTOR_PER_INSTRUMENT = { + "miri": ["MIRIFULONG", "MIRIFUSHORT", "MIRIMAGE"], + "nircam": [ + "NRCB4", + "NRCA4", + "NRCA2", + "NRCALONG", + "NRCBLONG", + "NRCB2", + "NRCB3", + "NRCA1", + "NRCA3", + "NRCB1", + ], + "niriss": ["NIS"], + "nirspec": ["NRS1", "NRS2"], + "fgs": ["GUIDER1", "GUIDER2"], +} + +# Default time range to use for EDB monitor telemetry plots. The plots will +# go from this starting time to the monitor run time, unless otherwise requested. +EDB_DEFAULT_PLOT_RANGE = 14 # days. + +EXP_TYPE_PER_INSTRUMENT = { + "fgs": ["FGS_FOCUS", "FGS_IMAGE", "FGS_INTFLAT", "FGS_SKYFLAT", "FGS_DARK"], + "miri": [ + "MIR_FLATMRS", + "MIR_MRS", + "MIR_FLATIMAGE", + "MIR_DARK", + "MIR_LYOT", + "MIR_IMAGE", + "MIR_LRS-FIXEDSLIT", + "MIR_LRS-SLITLESS", + "MIR_CORONCAL", + "MIR_4QPM", + "MIR_FLATIMAGE-EXT", + "MIR_TACQ", + "MIR_DARKMRS", + "MIR_DARKIMG", + "MIR_FLATMRS-EXT", + "MIR_TACONFIRM", + ], + "nircam": [ + "NRC_LED", + "NRC_DARK", + "NRC_CORON", + "NRC_IMAGE", + "NRC_FOCUS", + "NRC_TSGRISM", + "NRC_TSIMAGE", + "NRC_WFSS", + "NRC_TACQ", + "NRC_TACONFIRM", + "NRC_FLAT", + "NRC_GRISM", + ], + "niriss": [ + "NIS_IMAGE", + "NIS_FOCUS", + "NIS_SOSS", + "NIS_AMI", + "NIS_LAMP", + "NIS_WFSS", + "NIS_DARK", + "NIS_EXTCAL", + "NIS_TACONFIRM", + "NIS_TACQ", + ], + "nirspec": [ + "NRS_IFU", + "NRS_MSASPEC", + "NRS_BRIGHTOBJ", + "NRS_DARK", + "NRS_AUTOWAVE", + "NRS_LAMP", + "NRS_AUTOFLAT", + "NRS_IMAGE", + "NRS_CONFIRM", + "NRS_FIXEDSLIT", + "NRS_MIMF", + "NRS_FOCUS", + "NRS_TACONFIRM", + "NRS_WATA", + "NRS_MSATA", + ], +} + +EXPTYPES = { + "nircam": { + "imaging": "NRC_IMAGE", + "ts_imaging": "NRC_TSIMAGE", + "wfss": "NRC_WFSS", + "ts_grism": "NRC_TSGRISM", + }, + "niriss": { + "imaging": "NIS_IMAGE", + "ami": "NIS_IMAGE", + "pom": "NIS_IMAGE", + "wfss": "NIS_WFSS", + }, + "fgs": {"imaging": "FGS_IMAGE"}, +} + +EXPOSURE_PAGE_SUFFIX_ORDER = [ + "uncal", + "dark", + "trapsfilled", + "ramp", + "rate", + "rateints", + "fitopt", + "cal", + "calints", + "msa", + "crf", + "crfints", + "bsub", + "bsubints", + "i2d", + "s2d", + "s3d", + "x1d", + "x1dints", + "cat", + "segm", + "c1d", + "psfstack", + "psfalign", + "psfsub", + "amiavg", + "aminorm", + "ami", + "psf-amiavg", + "phot", + "whtlt", + "wfscmb", +] + +# Filename Component Lengths +FILE_AC_CAR_ID_LEN = 4 +FILE_AC_O_ID_LEN = 3 +FILE_ACT_LEN = 2 +FILE_DATETIME_LEN = 13 +FILE_EPOCH_LEN = 1 +FILE_GUIDESTAR_ATTMPT_LEN_MIN = 1 +FILE_GUIDESTAR_ATTMPT_LEN_MAX = 3 +FILE_OBS_LEN = 3 +FILE_PARALLEL_SEQ_ID_LEN = 1 +FILE_PROG_ID_LEN = 5 +FILE_SEG_LEN = 3 +FILE_SOURCE_ID_LEN = 5 +FILE_TARG_ID_LEN = 3 +FILE_VISIT_GRP_LEN = 2 +FILE_VISIT_LEN = 3 + +# MSA metadata file do not have a standard suffix attached +FILETYPE_WO_STANDARD_SUFFIX = "msa.fits" + +FLAT_EXP_TYPES = { + "nircam": ["NRC_FLAT"], + "niriss": ["NIS_LAMP"], + "miri": ["MIR_FLATIMAGE", "MIR_FLATMRS"], + "nirspec": ["NRS_AUTOFLAT", "NRS_LAMP"], + "fgs": ["FGS_INTFLAT"], +} + +# output subdirectories to keep track of via the filesytem monitor +FILESYSTEM_MONITOR_SUBDIRS = ["logs", "outputs", "preview_images", "thumbnails", "all"] + +FILTERS_PER_INSTRUMENT = { + "fgs": [], + "miri": [ + "F560W", + "F770W", + "F1000W", + "F1065C", + "F1130W", + "F1140C", + "F1280W", + "F1500W", + "F1550C", + "F1800W", + "F2100W", + "F2300C", + "F2550W", + "F2550WR", + "FLENS", + "FND", + "OPAQUE", + "P750L", + ], + "nircam": [ + "F070W", + "F090W", + "F115W", + "F140M", + "F150W", + "F150W2", + "F182M", + "F187N", + "F200W", + "F210M", + "F212N", + "WLP4", + "F277W", + "F356W", + "F444W", + "F300M", + "F335M", + "F360M", + "F410M", + "F430M", + "F460M", + "F480M", + "F250M", + "F322W2", + ], + "niriss": [ + "F090W", + "F115W", + "F140M", + "F150W", + "F200W", + "F277W", + "F356W", + "F380M", + "F430M", + "F444W", + "F480M", + "GR150C", + "GR150R", + ], + "nirspec": [ + "CLEAR", + "F070LP", + "F100LP", + "F110W", + "F140X", + "F170LP", + "F290LP", + "OPAQUE", + "P750L", + ], +} + +FOUR_AMP_SUBARRAYS = ["WFSS128R", "WFSS64R"] # Names of full-frame apertures for all instruments -FULL_FRAME_APERTURES = {'NIRCAM': ['NRCA1_FULL', 'NRCA2_FULL', 'NRCA3_FULL', 'NRCA4_FULL', - 'NRCA5_FULL', 'NRCB1_FULL', 'NRCB2_FULL', 'NRCB3_FULL', - 'NRCB4_FULL', 'NRCB5_FULL'], - 'NIRISS': ['NIS_CEN'], - 'NIRSPEC': ['NRS1_FULL', 'NRS2_FULL'], - 'MIRI': ['MIRIM_FULL'], - 'FGS': ['FGS1_FULL', 'FGS2_FULL'] - } +FULL_FRAME_APERTURES = { + "NIRCAM": [ + "NRCA1_FULL", + "NRCA2_FULL", + "NRCA3_FULL", + "NRCA4_FULL", + "NRCA5_FULL", + "NRCB1_FULL", + "NRCB2_FULL", + "NRCB3_FULL", + "NRCB4_FULL", + "NRCB5_FULL", + ], + "NIRISS": ["NIS_CEN"], + "NIRSPEC": ["NRS1_FULL", "NRS2_FULL"], + "MIRI": ["MIRIM_FULL"], + "FGS": ["FGS1_FULL", "FGS2_FULL"], +} # Possible suffix types for nominal files -GENERIC_SUFFIX_TYPES = ['uncal', 'cal', 'rateints', 'rate', 'trapsfilled', 'i2d', - 'x1dints', 'x1d', 's2d', 's3d', 'dark', 'crfints', - 'crf', 'ramp', 'fitopt', 'bsubints', 'bsub', 'cat', 'segm', 'c1d'] +GENERIC_SUFFIX_TYPES = [ + "uncal", + "cal", + "rateints", + "rate", + "trapsfilled", + "i2d", + "x1dints", + "x1d", + "s2d", + "s3d", + "dark", + "crfints", + "crf", + "ramp", + "fitopt", + "bsubints", + "bsub", + "cat", + "segm", + "c1d", +] # Gratings available for each instrument -GRATING_PER_INSTRUMENT = {'fgs': [], - 'miri': [], - 'nircam': [], - 'niriss': [], - 'nirspec': ['G140M', 'G235M', 'G395M', 'G140H', - 'G235H', 'G395H', 'PRISM'] - } +GRATING_PER_INSTRUMENT = { + "fgs": [], + "miri": [], + "nircam": [], + "niriss": [], + "nirspec": [ + "G140M", + "G235M", + "G395M", + "G140H", + "G235H", + "G395H", + "PRISM", + "MIRROR", + ], +} + +# Filename extensions for guider data +GUIDER_FILENAME_TYPE = ["gs-fg", "gs-track", "gs-id", "gs-acq1", "gs-acq2"] # Possible suffix types for guider exposures -GUIDER_SUFFIX_TYPES = ['stream', 'stacked_uncal', 'image_uncal', 'stacked_cal', 'image_cal'] +GUIDER_SUFFIX_TYPES = [ + "stream", + "stacked_uncal", + "image_uncal", + "stacked_cal", + "image_cal", +] # JWQL should ignore some filetypes in the filesystem. -IGNORED_SUFFIXES = ['original', 'stream'] +IGNORED_SUFFIXES = ["original", "stream", "x1d", "x1dints", "c1d", "pre-image"] # Instrument monitor database tables INSTRUMENT_MONITOR_DATABASE_TABLES = { - 'dark_monitor': ['_dark_dark_current', '_dark_pixel_stats', '_dark_query_history'], - 'bad_pixel_monitor': ['_bad_pixel_stats', '_bad_pixel_query_history']} + "dark_monitor": [ + "_dark_dark_current", + "_dark_pixel_stats", + "_dark_query_history", + ], + "bad_pixel_monitor": [ + "_bad_pixel_stats", + "_bad_pixel_query_history", + ], + "cosmic_ray_monitor": [ + "_cosmic_ray_stats", + "_cosmic_ray_query_history", + ], + "msata_monitor": ["_ta_stats", "_ta_query_history"], + "wata_monitor": ["_ta_stats", "_ta_query_history"], +} INSTRUMENT_SERVICE_MATCH = { - 'FGS': 'Mast.Jwst.Filtered.Fgs', - 'MIRI': 'Mast.Jwst.Filtered.Miri', - 'NIRCam': 'Mast.Jwst.Filtered.Nircam', - 'NIRISS': 'Mast.Jwst.Filtered.Niriss', - 'NIRSpec': 'Mast.Jwst.Filtered.Nirspec'} + "FGS": "Mast.Jwst.Filtered.Fgs", + "MIRI": "Mast.Jwst.Filtered.Miri", + "NIRCam": "Mast.Jwst.Filtered.Nircam", + "NIRISS": "Mast.Jwst.Filtered.Niriss", + "NIRSpec": "Mast.Jwst.Filtered.Nirspec", +} # JWST data products -JWST_DATAPRODUCTS = ['IMAGE', 'SPECTRUM', 'SED', 'TIMESERIES', 'VISIBILITY', - 'EVENTLIST', 'CUBE', 'CATALOG', 'ENGINEERING', 'NULL'] +JWST_DATAPRODUCTS = [ + "IMAGE", + "SPECTRUM", + "SED", + "TIMESERIES", + "VISIBILITY", + "EVENTLIST", + "CUBE", + "CATALOG", + "ENGINEERING", + "NULL", +] # Lowercase JWST instrument names -JWST_INSTRUMENT_NAMES = sorted(['niriss', 'nircam', 'nirspec', 'miri', 'fgs']) +JWST_INSTRUMENT_NAMES = sorted(["niriss", "nircam", "nirspec", "miri", "fgs"]) # JWST instrument names with shorthand notation -JWST_INSTRUMENT_NAMES_SHORTHAND = {'gui': 'fgs', - 'mir': 'miri', - 'nis': 'niriss', - 'nrc': 'nircam', - 'nrs': 'nirspec'} +JWST_INSTRUMENT_NAMES_SHORTHAND = { + "gui": "fgs", + "mir": "miri", + "nis": "niriss", + "nrc": "nircam", + "nrs": "nirspec", +} # Mixed case JWST instrument names -JWST_INSTRUMENT_NAMES_MIXEDCASE = {'fgs': 'FGS', - 'miri': 'MIRI', - 'nircam': 'NIRCam', - 'niriss': 'NIRISS', - 'nirspec': 'NIRSpec'} +JWST_INSTRUMENT_NAMES_MIXEDCASE = { + "fgs": "FGS", + "miri": "MIRI", + "nircam": "NIRCam", + "niriss": "NIRISS", + "nirspec": "NIRSpec", +} # Upper case JWST instrument names -JWST_INSTRUMENT_NAMES_UPPERCASE = {key: value.upper() for key, value in - JWST_INSTRUMENT_NAMES_MIXEDCASE.items()} +JWST_INSTRUMENT_NAMES_UPPERCASE = { + key: value.upper() for key, value in JWST_INSTRUMENT_NAMES_MIXEDCASE.items() +} # Astoquery service string for each JWST instrument -JWST_MAST_SERVICES = ['Mast.Jwst.Filtered.{}'.format(value.title()) for value in - JWST_INSTRUMENT_NAMES] +JWST_MAST_SERVICES = [ + "Mast.Jwst.Filtered.{}".format(value.title()) for value in JWST_INSTRUMENT_NAMES +] + +# Possible values for look status filter +LOOK_OPTIONS = ["New", "Viewed"] + +# Maximum number of records returned by MAST for a single query +MAST_QUERY_LIMIT = 500000 + +# Expected position sensor values for MIRI. Used by the EDB monitor +# to filter out bad values. Tuple values are the expected value and +# the standard deviation associated with the value +MIRI_POS_RATIO_VALUES = { + "FW": { + "FND": (-164.8728073, 0.204655346), + "OPAQUE": (380.6122145, 0.078856646), + "F1000W": (-24.15638797, 0.182865887), + "F1130W": (137.8245397, 0.24910941), + "F1280W": (-298.7062532, 0.229963508), + "P750L": (12.39439777, 0.246932037), + "F1500W": (-377.9888235, 0.263432415), + "F1800W": (435.9046314, 0.27885876), + "F2100W": (-126.5991201, 0.197193968), + "F560W": (218.0010353, 0.282554884), + "FLENS": (-212.7978283, 0.409300208), + "F2300C": (306.0488778, 0.265448583), + "F770W": (-62.48455213, 0.340861733), + "F1550C": (188.7366748, 0.291288105), + "F2550W": (-324.2364737, 0.176262309), + "F1140C": (82.81057729, 0.169772457), + "F2550WR": (-255.5816917, 0.251581688), + "F1065C": (261.4486618, 0.16177981), + }, + "CCC": {"CLOSED": (398.0376386, 0.173703628), "OPEN": (504.0482685, 0.328112274)}, + "GW14": { + "SHORT": (626.9411005, 0.116034024), + "MEDIUM": (342.8685233, 0.127123169), + "LONG": (408.8339259, 0.117079193), + }, + "GW23": { + "SHORT": (619.7948107, 0.215417336), + "MEDIUM": (373.1697309, 0.204314122), + "LONG": (441.6632325, 0.349161169), + }, +} + +# Suffix for msa files +MSA_SUFFIX = ["msa"] # Available monitor names and their location for each JWST instrument MONITORS = { - 'fgs': [('Bad Pixel Monitor', '/fgs/bad_pixel_monitor'), - ('Readnoise Monitor', '/fgs/readnoise_monitor'), - ('Dark Current Monitor', '/fgs/dark_monitor')], - 'miri': [('Dark Current Monitor', '/miri/dark_monitor'), - ('Data Trending', '#'), - ('Bad Pixel Monitor', '/miri/bad_pixel_monitor'), - ('Readnoise Monitor', '/miri/readnoise_monitor'), - ('Cosmic Ray Monitor', '#'), - ('Photometry Monitor', '#'), - ('TA Failure Monitor', '#'), - ('Blind Pointing Accuracy Monitor', '#'), - ('Filter and Calibration Lamp Monitor', '#'), - ('Thermal Emission Monitor', '#')], - 'nircam': [('Bias Monitor', '/nircam/bias_monitor'), - ('Readnoise Monitor', '/nircam/readnoise_monitor'), - ('Gain Level Monitor', '#'), - ('Dark Current Monitor', '/nircam/dark_monitor'), - ('Bad Pixel Monitor', '/nircam/bad_pixel_monitor'), - ('Photometric Stability Monitor', '#')], - 'niriss': [('Bad Pixel Monitor', '/niriss/bad_pixel_monitor'), - ('Readnoise Monitor', '/niriss/readnoise_monitor'), - ('AMI Calibrator Monitor', '#'), - ('TSO RMS Monitor', '#'), - ('Bias Monitor', '/niriss/bias_monitor'), - ('Dark Current Monitor', '/niriss/dark_monitor')], - 'nirspec': [('Optical Short Monitor', '#'), - ('Bad Pixel Monitor', '/nirspec/bad_pixel_monitor'), - ('Readnoise Monitor', '/nirspec/readnoise_monitor'), - ('Target Acquisition Monitor', '#'), - ('Data Trending', '#'), - ('Detector Health Monitor', '#'), - ('Ref Pix Monitor', '#'), - ('Internal Lamp Monitor', '#'), - ('Instrument Model Updates', '#'), - ('Failed-open Shutter Monitor', '#'), - ('Bias Monitor', '/nirspec/bias_monitor'), - ('Dark Monitor', '/nirspec/dark_monitor')]} + "fgs": [ + ("Bad Pixel Monitor", "/fgs/bad_pixel_monitor"), + ("Cosmic Ray Monitor", "#"), + ("Dark Current Monitor", "/fgs/dark_monitor"), + ("EDB Telemetry Monitor", "/fgs/edb_monitor"), + ("Readnoise Monitor", "/fgs/readnoise_monitor"), + ], + "miri": [ + ("Bad Pixel Monitor", "/miri/bad_pixel_monitor"), + ("Cosmic Ray Monitor", "#"), + ("Dark Current Monitor", "/miri/dark_monitor"), + ("EDB Telemetry Monitor", "/miri/edb_monitor"), + ("Readnoise Monitor", "/miri/readnoise_monitor"), + ], + "nircam": [ + ("Bad Pixel Monitor", "/nircam/bad_pixel_monitor"), + ("Bias Monitor", "/nircam/bias_monitor"), + ("Cosmic Ray Monitor", "#"), + ("Dark Current Monitor", "/nircam/dark_monitor"), + ("EDB Telemetry Monitor", "/nircam/edb_monitor"), + ("Readnoise Monitor", "/nircam/readnoise_monitor"), + ], + "niriss": [ + ("Bad Pixel Monitor", "/niriss/bad_pixel_monitor"), + ("Bias Monitor", "/niriss/bias_monitor"), + ("Cosmic Ray Monitor", "#"), + ("Dark Current Monitor", "/niriss/dark_monitor"), + ("EDB Telemetry Monitor", "/niriss/edb_monitor"), + ("Readnoise Monitor", "/niriss/readnoise_monitor"), + ], + "nirspec": [ + ("Bad Pixel Monitor", "/nirspec/bad_pixel_monitor"), + ("Bias Monitor", "/nirspec/bias_monitor"), + ("Dark Monitor", "/nirspec/dark_monitor"), + ("Cosmic Ray Monitor", "#"), + ("EDB Telemetry Monitor", "/nirspec/edb_monitor"), + ("MSATA Monitor", "/nirspec/msata_monitor"), + ("Readnoise Monitor", "/nirspec/readnoise_monitor"), + ("WATA Monitor", "/nirspec/wata_monitor"), + ], +} # Possible suffix types for coronograph exposures -NIRCAM_CORONAGRAPHY_SUFFIX_TYPES = ['psfstack', 'psfalign', 'psfsub'] +NIRCAM_CORONAGRAPHY_SUFFIX_TYPES = ["psfstack", "psfalign", "psfsub"] # NIRCam subarrays that use four amps for readout -NIRCAM_FOUR_AMP_SUBARRAYS = ['WFSS128R', 'WFSS64R'] +NIRCAM_FOUR_AMP_SUBARRAYS = ["WFSS128R", "WFSS64R"] # NIRCam long wavelength detector names -NIRCAM_LONGWAVE_DETECTORS = ['NRCA5', 'NRCB5'] +NIRCAM_LONGWAVE_DETECTORS = ["NRCA5", "NRCB5"] # NIRCam short wavelength detector names -NIRCAM_SHORTWAVE_DETECTORS = ['NRCA1', 'NRCA2', 'NRCA3', 'NRCA4', - 'NRCB1', 'NRCB2', 'NRCB3', 'NRCB4'] +NIRCAM_SHORTWAVE_DETECTORS = [ + "NRCA1", + "NRCA2", + "NRCA3", + "NRCA4", + "NRCB1", + "NRCB2", + "NRCB3", + "NRCB4", +] # NIRCam subarrays that use either one or four amps -NIRCAM_SUBARRAYS_ONE_OR_FOUR_AMPS = ['SUBGRISMSTRIPE64', 'SUBGRISMSTRIPE128', 'SUBGRISMSTRIPE256'] +NIRCAM_SUBARRAYS_ONE_OR_FOUR_AMPS = [ + "SUBGRISMSTRIPE64", + "SUBGRISMSTRIPE128", + "SUBGRISMSTRIPE256", +] # Possible suffix types for AMI files -NIRISS_AMI_SUFFIX_TYPES = ['amiavg', 'aminorm', 'ami', 'psf-amiavg'] +NIRISS_AMI_SUFFIX_TYPES = ["amiavg", "aminorm", "ami", "psf-amiavg"] # Base name for the file listing the preview images for a given instrument. # The complete name will have "_{instrument.lower}.txt" added to the end of this. -PREVIEW_IMAGE_LISTFILE = 'preview_image_inventory' +PREVIEW_IMAGE_LISTFILE = "preview_image_inventory" + +# All possible proposal categories +PROPOSAL_CATEGORIES = ["AR", "CAL", "COM", "DD", "ENG", "GO", "GTO", "NASA", "SURVEY"] + +PUPILS_PER_INSTRUMENT = { + "nircam": [ + "CLEAR", + "FLAT", + "F162M", + "F164N", + "GDHS0", + "GDHS60", + "MASKBAR", + "MASKIPR", + "MASKRND", + "PINHOLES", + "WLM8", + "WLP8", + "F323N", + "F405N", + "F466N", + "F470N", + "GRISMC", + "GRISMR", + "GRISMV2", + "GRISMV3", + ], + "niriss": [ + "CLEARP", + "F090W", + "F115W", + "F140M", + "F150W", + "F158M", + "F200W", + "GR700XD", + "NRM", + ], + "nirspec": [], + "miri": [], + "fgs": [], +} + + +# Keep keys defined via class as they are used many places with potential mispellings +# Keys are in sort order from general to instrument specific, then alphabetical +# within instrument specific fields. +class QueryConfigKeys: + INSTRUMENTS = "INSTRUMENTS" + PROPOSAL_CATEGORY = "PROPOSAL_CATEGORY" + LOOK_STATUS = "LOOK_STATUS" + DATE_RANGE = "DATE_RANGE" + NUM_PER_PAGE = "NUM_PER_PAGE" + SORT_TYPE = "SORT_TYPE" + ANOMALIES = "ANOMALIES" + APERTURES = "APERTURES" + DETECTORS = "DETECTORS" + EXP_TYPES = "EXP_TYPES" + FILTERS = "FILTERS" + GRATINGS = "GRATINGS" + PUPILS = "PUPILS" + READ_PATTS = "READ_PATTS" + SUBARRAYS = "SUBARRAYS" + + +# Template for parameters to be stored in "query_config" session for query_page +QUERY_CONFIG_TEMPLATE = { + QueryConfigKeys.INSTRUMENTS: [], + QueryConfigKeys.PROPOSAL_CATEGORY: [], + QueryConfigKeys.LOOK_STATUS: [], + QueryConfigKeys.NUM_PER_PAGE: 100, + QueryConfigKeys.SORT_TYPE: "Recent", + QueryConfigKeys.DATE_RANGE: "", + QueryConfigKeys.ANOMALIES: {}, + QueryConfigKeys.APERTURES: {}, + QueryConfigKeys.DETECTORS: {}, + QueryConfigKeys.EXP_TYPES: {}, + QueryConfigKeys.FILTERS: {}, + QueryConfigKeys.GRATINGS: {}, + QueryConfigKeys.PUPILS: {}, + QueryConfigKeys.READ_PATTS: {}, + QueryConfigKeys.SUBARRAYS: {}, +} # RAPID-style readout patterns for each instrument. Added so we can # differentiate in MAST searches for e.g. the dark current monitor -RAPID_READPATTERNS = {'fgs': ['FGSRAPID'], - 'miri': ['FAST', 'FASTR1', 'SLOW', 'SLOWR1', 'FASTGRPAVG', - 'FASTGRPAVG8', 'FASTGRPAVG16', 'FASTGRPAVG32', - 'FASTGRPAVG64', 'FASTR100'], - 'nircam': ['RAPID'], - 'niriss': ['NISRAPID'], - 'nirspec': ['NRSRAPID', 'NRSIRS2RAPID']} - -READPATT_PER_INSTRUMENT = {'fgs': ['FGS', 'FGSRAPID', 'FGS60', 'FGS840', 'FGS8370'], - 'miri': ['FAST', 'FASTR1', 'SLOW', 'SLOWR1', 'FASTGRPAVG', - 'FASTGRPAVG8', 'FASTGRPAVG16', 'FASTGRPAVG32', - 'FASTGRPAVG64', 'FASTR100'], - 'nircam': ['RAPID', 'SHALLOW2', 'BRIGHT2', 'MEDIUM2', 'SHALLOW4', - 'MEDIUM8', 'BRIGHT1', 'DEEP2', 'DEEP8'], - 'niriss': ['NISRAPID', 'NIS'], - 'nirspec': ['NRS', 'NRSRAPID', 'NRSIRS2RAPID', - 'NRSRAPIDD2', 'NRSRAPIDD6']} - -SUBARRAYS_ONE_OR_FOUR_AMPS = ['SUBGRISMSTRIPE64', 'SUBGRISMSTRIPE128', 'SUBGRISMSTRIPE256'] +RAPID_READPATTERNS = { + "fgs": ["FGSRAPID"], + "miri": [ + "FAST", + "FASTR1", + "SLOW", + "SLOWR1", + "FASTGRPAVG", + "FASTGRPAVG8", + "FASTGRPAVG16", + "FASTGRPAVG32", + "FASTGRPAVG64", + "FASTR100", + ], + "nircam": ["RAPID"], + "niriss": ["NISRAPID"], + "nirspec": ["NRSRAPID", "NRSIRS2RAPID"], +} + +READPATT_PER_INSTRUMENT = { + "fgs": ["FGS", "FGSRAPID", "FGS60", "FGS840", "FGS8370"], + "miri": [ + "FAST", + "FASTR1", + "SLOW", + "SLOWR1", + "FASTGRPAVG", + "FASTGRPAVG8", + "FASTGRPAVG16", + "FASTGRPAVG32", + "FASTGRPAVG64", + "FASTR100", + ], + "nircam": [ + "RAPID", + "SHALLOW2", + "BRIGHT2", + "MEDIUM2", + "SHALLOW4", + "MEDIUM8", + "BRIGHT1", + "DEEP2", + "DEEP8", + ], + "niriss": ["NISRAPID", "NIS"], + "nirspec": ["NRS", "NRSRAPID", "NRSIRS2RAPID", "NRSRAPIDD2", "NRSRAPIDD6"], +} + + +REPORT_KEYS_PER_INSTRUMENT = { + "fgs": [ + "proposal", + "exp_type", + "expstart", + "filter", + "aperture", + "detector", + "subarray", + "viewed", + ], + "miri": [ + "proposal", + "exp_type", + "expstart", + "filter", + "aperture", + "detector", + "subarray", + "viewed", + ], + "nircam": [ + "proposal", + "exp_type", + "expstart", + "filter", + "pupil", + "aperture", + "detector", + "subarray", + "viewed", + ], + "niriss": [ + "proposal", + "exp_type", + "expstart", + "filter", + "pupil", + "aperture", + "detector", + "subarray", + "viewed", + ], + "nirspec": ["exp_type", "filter", "grating", "read_patt_num", "viewed"], +} + +# Possible values for sort order +SORT_OPTIONS = ["Ascending", "Descending", "Recent", "Oldest"] + +SUBARRAYS_ONE_OR_FOUR_AMPS = [ + "SUBGRISMSTRIPE64", + "SUBGRISMSTRIPE128", + "SUBGRISMSTRIPE256", +] + +SUBARRAYS_PER_INSTRUMENT = { + "nircam": [ + "FULL", + "FULLP", + "SUB640", + "SUB320", + "SUB160", + "SUB400P", + "SUB160P", + "SUB64P", + "SUB32TATS", + "SUB640A210R", + "SUB640ASWB", + "SUB320A335R", + "SUB320A430R", + "SUB320ALWB", + "SUBGRISM256", + "SUBGRISM128", + "SUBGRISM64", + "SUB32TATSGRISM", + ], + "niriss": [ + "FULL", + "SUBSTRIP96", + "SUBSTRIP256", + "SUB80", + "SUB64", + "SUB128", + "SUB256", + "WFSS64R", + "WFSS128R", + "WFSS64C", + "WFSS128C", + "SUBAMPCAL", + "SUBTAAMI", + "SUBTASOSS", + ], + "nirspec": [], + "miri": [ + "BRIGHTSKY", + "FULL", + "MASK1065", + "MASK1140", + "MASK1550", + "MASKLYOT", + "SLITLESSPRISM", + "SUB64", + "SUB128", + "SUB256", + ], + "fgs": [], +} + +# Filename suffixes that need to include the association value in the suffix in +# order to identify the preview image file. This should only be crf and crfints, +# since those are essentially level 2 files that are output by the level 3 pipeline. +SUFFIXES_TO_ADD_ASSOCIATION = ["crf", "crfints"] + +# Filename suffixes where data have been averaged over integrations +SUFFIXES_WITH_AVERAGED_INTS = ["rate", "cal", "crf", "i2d", "bsub"] + +# boolean accessed according to a viewed flag +THUMBNAIL_FILTER_LOOK = ["New", "Viewed"] # Base name for the file listing the thumbnail images for a given instrument. # The complete name will have "_{instrument.lower}.txt" added to the end of this. -THUMBNAIL_LISTFILE = 'thumbnail_inventory' +THUMBNAIL_LISTFILE = "thumbnail_inventory" # Possible suffix types for time-series exposures -TIME_SERIES_SUFFIX_TYPES = ['phot', 'whtlt'] +TIME_SERIES_SUFFIX_TYPES = ["phot", "whtlt"] # Possible suffix types for WFS&C files -WFSC_SUFFIX_TYPES = ['wfscmb'] +WFSC_SUFFIX_TYPES = ["wfscmb"] # Concatenate all suffix types (ordered to ensure successful matching) -FILE_SUFFIX_TYPES = GUIDER_SUFFIX_TYPES + GENERIC_SUFFIX_TYPES + \ - TIME_SERIES_SUFFIX_TYPES + NIRCAM_CORONAGRAPHY_SUFFIX_TYPES + \ - NIRISS_AMI_SUFFIX_TYPES + WFSC_SUFFIX_TYPES +FILE_SUFFIX_TYPES = ( + GUIDER_SUFFIX_TYPES + + GENERIC_SUFFIX_TYPES + + TIME_SERIES_SUFFIX_TYPES + + NIRCAM_CORONAGRAPHY_SUFFIX_TYPES + + NIRISS_AMI_SUFFIX_TYPES + + WFSC_SUFFIX_TYPES + + MSA_SUFFIX +) # Instrument Documentation Links -URL_DICT = {'fgs': 'https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor', - 'miri': 'https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument', - 'niriss': 'https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph', - 'nirspec': 'https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph', - 'nircam': 'https://jwst-docs.stsci.edu/jwst-near-infrared-camera'} +URL_DICT = { + "fgs": "https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor", + "miri": "https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument", + "niriss": "https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph", + "nirspec": "https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph", + "nircam": "https://jwst-docs.stsci.edu/jwst-near-infrared-camera", +} diff --git a/jwql/utils/interactive_preview_image.py b/jwql/utils/interactive_preview_image.py new file mode 100644 index 000000000..da9563770 --- /dev/null +++ b/jwql/utils/interactive_preview_image.py @@ -0,0 +1,734 @@ +#! /usr/bin/env python + +"""Module that can be used to read in a JWST observation and display one frame + (from any extension) as an interactive Bokeh image. + Authors + ------- + - Bryan Hilbert + Use + --- + This module can be imported and called as such: + :: + from jwql.xxx.xxx import InteractivePreviewImg + file = 'jw01602001001_02102_00001_nrcb2_cal.fits' + im = InteractivePreviewImg( + file, low_lim=None, high_lim=None, scaling='lin', contrast=0.4, extname='DQ') + Required Arguments: + ''filename'' - Name of a fits file containing a JWST observation + """ +from copy import deepcopy +import os + +import numpy as np +from astropy.io import fits +from astropy.visualization import ZScaleInterval, MinMaxInterval, PercentileInterval +from astropy.wcs import WCS +from bokeh.embed import components +from bokeh.layouts import gridplot, layout +from bokeh.models import ( + BasicTicker, BoxZoomTool, Button, ColorBar, ColumnDataSource, + CustomJS, Div, HoverTool, LinearColorMapper, LogColorMapper, LogTicker, + RadioGroup, Range1d, Row, Select, Spacer, Spinner, WheelZoomTool) +from bokeh.plotting import figure, output_file, show, save +from jwst.datamodels import dqflags + + +class InteractivePreviewImg: + """Class to create the interactive Bokeh figure. + """ + + def __init__(self, filename, low_lim=None, high_lim=None, scaling='lin', contrast=None, extname='SCI', + group=None, integ=None, mask=None, line_plots=False, save_html=None, show=False): + """Populate attributes, read in data, and create the Bokeh figure + Parameters + ---------- + filename : str + Name of fits file containing observation data + low_lim : float + Signal value to use as the lower limit of the displayed image. If None, it will be calculated + using the ZScale function + high_lim : float + Signal value to use as the upper limit of the displayed image. If None, it will be calculated + using the ZScale function + scaling : str + Can be 'log' or 'lin', indicating logarithmic or linear scaling + contrast : float + Used in the ZScale function to calculated ``low_lim`` and ''high_lim``. Larger values result + in a larger range between ``low_lim`` and ``high_lim``. + extname : str + Extension name within ``filename`` to read in. + integ : int or list + If an integer, this is the integration number of the data to be read in. Defaults to 0 (first + integration). If a 2-element list, this lists the integration numbers of 2 frames to be read in + and subtracted prior to display. + group : int or list + If an integer, this is the group number within ``integ`` to read in and display. Defaults to -1 + (final group of ``integration``). If a 2-element list, this lists the group numbers corresponding + to the 2-element list in ``integ`` for the 2 frames to be read in and subtracted prior to display. + mask : numpy.ndarray + Mask to use in order to avoid some pixels when auto-scaling. Pixels with a value other than 0 will + be ignored when auto-scaling. + line_plots : bool + If set, column and row plots are added to the layout, to be updated on click in the main figure. + These take some time to create, so are off by default. + save_html : str + Name of html file to save the figure to. If None, the components are returned instead. + show : bool + If True, the figure is shown on the screen rather than being saved or returned. Overrides ``save_html``. + """ + self.filename = filename + self.low_lim = low_lim + self.high_lim = high_lim + self.scaling = scaling + self.contrast = contrast + self.extname = extname.upper() + self.mask = mask + self.show_line_plots = line_plots + self.show = show + self.save_html = save_html + + # Allow sending in of None without overriding defaults + if group is None: + group = -1 + if integ is None: + integ = 0 + + # Determine the min and max values to use for the display + if self.contrast is None: + self.contrast = 0.25 + if isinstance(group, list): + if len(group) > 2: + raise ValueError( + 'group must be an integer or 2-element list') + self.group = group + if isinstance(integ, list): + if len(integ) > 2: + raise ValueError( + 'integ must be an integer or 2-element list') + self.integ = integ + + self.data = None + self.signal_units = None + self.wcs_coord = None + self.get_data() + if 'DQ' in self.extname: + self.get_bits() + # col/row plots not available for dq values + self.show_line_plots = False + self.script, self.div = self.create_bokeh_image() + + def create_bokeh_image(self): + """Method to create the figure + """ + limits = self.get_scale() + if self.low_lim is not None: + limits = (self.low_lim, limits[1]) + if self.high_lim is not None: + limits = (limits[0], self.high_lim) + + # handle log or linear scaling + if limits[0] <= 0: + log_limits = (1e-4, limits[1]) + else: + log_limits = limits + log_color_mapper = LogColorMapper( + palette="Viridis256", low=log_limits[0], high=log_limits[1]) + log_ticker = LogTicker() + lin_color_mapper = LinearColorMapper( + palette="Viridis256", low=limits[0], high=limits[1]) + lin_ticker = BasicTicker() + active = int(self.scaling == 'log') + + yd, xd = self.data.shape + info = dict(image=[self.data], x=[0], y=[0], dw=[xd], dh=[yd]) + if 'DQ' in self.extname: + info["dq"] = [self.bit_list] + if self.wcs_coord is not None and len(self.wcs_coord) == 2: + info["ra"] = [self.wcs_coord[0]] + info["dec"] = [self.wcs_coord[1]] + source = ColumnDataSource(info) + + if not self.show and self.save_html is not None: + output_file(filename=self.save_html, + title=os.path.basename(self.filename)) + + # fix figure aspect from data aspect + # bokeh throws errors if plot is too small, so make sure + # the smaller dimension has reasonable size + max_dim, min_dim = 700, 400 + if xd > yd: + plot_width = max_dim + plot_height = int(plot_width * yd / xd) + if plot_height < min_dim: + plot_height = min_dim + else: + plot_height = max_dim + plot_width = int(plot_height * xd / yd) + if plot_width < min_dim: + plot_width = min_dim + + fig = figure(tools='pan,reset,save', match_aspect=True, + plot_width=plot_width, plot_height=plot_height) + fig.add_tools(BoxZoomTool(match_aspect=True)) + fig.add_tools(WheelZoomTool(zoom_on_axis=False)) + + # make both linear and log scale images to allow toggling between them + images = [] + color_bars = [] + scales = ((lin_color_mapper, lin_ticker), (log_color_mapper, log_ticker)) + for i, config in enumerate(scales): + color_mapper, ticker = config + visible = (i == active) + img = fig.image(source=source, image='image', + level="image", color_mapper=color_mapper, visible=visible) + color_bar = ColorBar(color_mapper=color_mapper, label_standoff=12, ticker=ticker, + title=self.signal_units, bar_line_color='black', + minor_tick_line_color='black', major_tick_line_color='black', + visible=visible) + if self.show_line_plots: + fig.add_layout(color_bar, 'above') + else: + fig.add_layout(color_bar, 'below') + images.append(img) + color_bars.append(color_bar) + + # limit whitespace around image as much as possible + fig.x_range.range_padding = fig.y_range.range_padding = 0 + if xd >= yd: + fig.x_range.start = 0 + fig.x_range.end = xd + fig.x_range.bounds = (0, xd) + if yd >= xd: + fig.y_range.start = 0 + fig.y_range.end = yd + fig.y_range.bounds = (0, yd) + + hover_div, hover_tool = self.add_hover_tool(source, images) + + self.create_figure_title() + fig.title.text = self.title + fig.xaxis.axis_label = 'Pixel' + fig.yaxis.axis_label = 'Pixel' + fig.add_tools(hover_tool) + + # add interactive widgets + widgets = self.add_interactive_controls(images, color_bars) + if self.show_line_plots: + # add row and column plots + col_plot, row_plot = self.line_plots(fig) + grid = gridplot([fig, col_plot, row_plot, hover_div], + ncols=2, merge_tools=False) + else: + grid = gridplot([fig, hover_div], ncols=2, merge_tools=False) + + box_layout = layout(children=[grid, *widgets]) + + # Show figure on screen if requested + if self.show: + show(box_layout) + elif self.save_html is not None: + save(box_layout) + else: + return components(box_layout) + + def line_plots(self, main_figure): + """ + Pre-compute column and row plots for each pixel. + + Parameters + ---------- + main_figure : figure + Main figure containing image. + + Returns + ------- + list of figure + New figures to add to the page layout. + """ + new_plots = [] + new_lines = [] + match_ranges = [] + value_ranges = [] + + ny, nx = self.data.shape + col_idx, row_idx = np.indices((ny, nx)) + directions = ['x', 'y'] + for index_direction in directions: + if index_direction == 'x': + # column plots + fig = figure(plot_width=200, plot_height=main_figure.height, tools='', + y_axis_location='right', margin=(0, 0, 0, 30)) + fig.toolbar.logo = None + + fig.x_range = Range1d() + fig.y_range = Range1d() + match_range = fig.y_range + main_range = main_figure.y_range + value_range = fig.x_range + + fig.xaxis.axis_label = self.signal_units + fig.yaxis.axis_label = 'Row pixel (y)' + fig.xaxis.major_label_orientation = np.radians(-45) + + n_plot = nx + initial_visible = n_plot // 2 + + x = self.data.T + y = col_idx.T + min_val = np.nanmin(x[initial_visible]) + max_val = np.nanmax(x[initial_visible]) + + else: + # row plots + fig = figure(plot_height=200, plot_width=main_figure.width, tools='') + fig.toolbar.logo = None + + fig.y_range = Range1d() + fig.x_range = Range1d() + match_range = fig.x_range + main_range = main_figure.x_range + value_range = fig.y_range + + fig.xaxis.axis_label = 'Column pixel (x)' + fig.yaxis.axis_label = self.signal_units + + # indexing is off by 1 for row plots for some reason + n_plot = ny + initial_visible = n_plot // 2 + + x = row_idx + 1 + y = self.data + min_val = np.nanmin(y[initial_visible]) + max_val = np.nanmax(y[initial_visible]) + + # match one of the axes to the main figure + if main_range.start is not None: + match_range.start = main_range.start + if main_range.end is not None: + match_range.end = main_range.end + main_range.js_link('start', match_range, 'start') + main_range.js_link('start', match_range, 'reset_start') + main_range.js_link('end', match_range, 'end') + main_range.js_link('end', match_range, 'reset_end') + + # initialize the other to the data + pad = 0.1 * (max_val - min_val) + value_range.start = min_val - pad + value_range.end = max_val + pad + + # plot a step line for each column and plot + # all but one are hidden to start + lines = [] + for i in range(n_plot): + line = fig.step(x=x[i], y=y[i], + mode='before', + visible=(i == initial_visible), + name=f'Data at {index_direction}={i}') + lines.append(line) + fig.title = lines[initial_visible].name + + new_lines.append(lines) + new_plots.append(fig) + match_ranges.append(match_range) + value_ranges.append(value_range) + + # watch for tap on plot - makes a new line visible, + # matching the selected point + update_plot = CustomJS( + args={'lines': new_lines, 'figures': new_plots}, + code=""" + var x = Math.floor(cb_obj.x); + var y = Math.floor(cb_obj.y); + figures[0].title.text = ""; + for (let i=0; i < lines[0].length; i++) { + if (i == x) { + lines[0][i].visible = true; + figures[0].title.text = lines[0][i].name; + } else { + lines[0][i].visible = false; + } + } + figures[1].title.text = ""; + for (let j=0; j < lines[1].length; j++) { + if (j == y) { + lines[1][j].visible = true; + figures[1].title.text = lines[1][j].name; + } else { + lines[1][j].visible = false; + } + }""") + main_figure.js_on_event('tap', update_plot) + + # watch for changes to matched axis to reset data range on value axis + for i in range(len(directions)): + limit_reset = CustomJS( + args={'line': new_lines[i], + 'direction': directions[i], + 'value_range': value_ranges[i], + 'match_range': match_ranges[i]}, + code=""" + var timeout; + if (direction == 'x') { + timeout = window._autoscale_timeout_x; + } else { + timeout = window._autoscale_timeout_y; + } + clearTimeout(timeout); + var min_val = Infinity; + var max_val = -Infinity; + for (let i=0; i < line.length; i++) { + if (line[i].visible == true) { + var data, idx; + if (direction == 'x') { + data = line[i].data_source.data['x']; + idx = line[i].data_source.data['y']; + } else { + data = line[i].data_source.data['y']; + idx = line[i].data_source.data['x']; + } + for (let j=0; j < data.length; j++) { + if (idx[j] >= match_range.start + && idx[j] <= match_range.end) { + if (Number.isFinite(data[j])) { + min_val = Math.min(data[j], min_val); + max_val = Math.max(data[j], max_val); + } + } + } + break; + } + } + if (Number.isFinite(min_val) && Number.isFinite(max_val) && min_val != max_val) { + var pad = 0.1 * (max_val - min_val); + if (direction == 'x') { + window._autoscale_timeout_x = setTimeout(function() { + value_range.start = min_val - pad; + value_range.end = max_val + pad; + }); + } else { + window._autoscale_timeout_y = setTimeout(function() { + value_range.start = min_val - pad; + value_range.end = max_val + pad; + }); + } + } + """) + match_ranges[i].js_on_change('start', limit_reset) + match_ranges[i].js_on_change('end', limit_reset) + + # also reset the limits when the plot is tapped for a new column/row + main_figure.js_on_event('tap', limit_reset) + + return new_plots + + def add_hover_tool(self, source, images): + """ + Make a hover tool with a div to display text. + + Parameters + ---------- + source : bokeh.models.ColumnDataSource + Data source for the figure. + images : list of bokeh.models.GlyphRenderer + Images to use as renderers for the hover tool. + + Returns + ------- + hover_div : bokeh.models.Div + Div element that will contain text from hover tool. + hover_tool : bokeh.models. + """ + hover_div = Div(height=300, width=300) + + is_dq = ('DQ' in self.extname) + hover_callback = CustomJS(args={'s': source, 'd': hover_div, + 'u': self.signal_units, 'dq': is_dq}, code=""" + const idx = cb_data.index.image_indices; + if (idx.length > 0) { + var x = idx[0].dim1; + var y = idx[0].dim2; + var flat = idx[0].flat_index; + var val; + var label; + if (dq === true) { + val = s.data['dq'][0][y][x]; + if (Array.isArray(val)) { + val = val.join(', '); + } + label = "Value"; + } else { + // get the data from the array of arrays + val = s.data['image'][0][y][x]; + if (val === undefined) { + // uncal images have to be addressed with the flat index + val = s.data['image'][0][flat]; + } + // report any non-number as NaN + if (!Number.isFinite(val)) { + val = 'NaN'; + } else { + val = val.toPrecision(5); + } + label = "Value (" + u + ")"; + } + d.text = "
Pixel Value
" + + "
" + + "
" + + "
(x, y) =
" + + "
(" + x + ", " + y + ")
" + + "
" + if ('ra' in s.data && 'dec' in s.data) { + var ra = s.data['ra'][0][flat].toPrecision(8); + var dec = s.data['dec'][0][flat].toPrecision(8); + d.text += "
" + + "
RA (deg)=
" + + "
" + ra + "
" + + "
" + + "
" + + "
Dec (deg)=
" + + "
" + dec + "
" + + "
" + } + d.text += "
" + + "
" + label + "=
" + + "
" + val + "
"; + } else { + d.text = ""; + } + """) + hover_tool = HoverTool(tooltips=None, mode='mouse', renderers=images, + callback=hover_callback) + + return hover_div, hover_tool + + def add_interactive_controls(self, images, color_bars): + """ + Add client-side controls for images. + + Currently includes image scaling and limit setting controls. + + Parameters + ---------- + images : list of bokeh.models.Image + 2-element list of images. The first is linear scale, second is log scale. + Only one should be visible at any time. + color_bars : list of bokeh.models.ColorBar + 2-element list of color bars, matching the images. + + Returns + ------- + widgets: list of bokeh.Widget + Widgets to add to the page layout. + """ + # active scaling (0=linear, 1=log) + active = int(self.scaling == 'log') + + tools_label = Div(text="

Image Settings

") + + scale_label = Div(text="Scaling:") + scale_group = RadioGroup(labels=["linear", "log"], + inline=True, active=active) + scale_set = Row(scale_label, scale_group, + css_classes=['mb-4']) + + current_low = images[active].glyph.color_mapper.low + current_high = images[active].glyph.color_mapper.high + preset_limits = {'ZScale': (current_low, current_high), + 'Min/Max': MinMaxInterval().get_limits(self.data), + '99.5%': PercentileInterval(99.5).get_limits(self.data), + '99%': PercentileInterval(99).get_limits(self.data), + '95%': PercentileInterval(95).get_limits(self.data), + '90%': PercentileInterval(90).get_limits(self.data)} + options = [*preset_limits.keys(), 'Custom'] + preset_label = Div(text="Percentile presets:") + preset_select = Select(value='ZScale', options=options, width=120) + preset_set = Row(preset_label, preset_select) + + limit_label = Div(text="Limits:") + limit_low = Spinner(title="Low", value=current_low) + limit_high = Spinner(title="High", value=current_high) + reset = Button(label='Reset', button_type='primary') + limit_set = Row(limit_label, limit_low, limit_high, + css_classes=['mb-4']) + + # JS callbacks for client side controls + + # set alternate image visibility when scale selection changes + scale_group.js_on_click(CustomJS(args={'i1': images[0], 'c1': color_bars[0], + 'i2': images[1], 'c2': color_bars[1]}, + code=""" + if (i1.visible == true) { + i1.visible = false; + c1.visible = false; + i2.visible = true; + c2.visible = true; + } else { + i1.visible = true; + c1.visible = true; + i2.visible = false; + c2.visible = false; + } + """)) + + # set scaling limits from select box on change + limit_reset = CustomJS( + args={'setting': preset_select, 'limits': preset_limits, 'low': limit_low, + 'high': limit_high, 'scale': scale_group}, + code=""" + if (setting.value != "Custom") { + if (scale.active == 1 && limits[setting.value][0] <= 0) { + low.value = 0.0001; + } else { + low.value = limits[setting.value][0]; + } + high.value = limits[setting.value][1]; + } + """) + preset_select.js_on_change('value', limit_reset) + + # set scaling limits from text boxes on change + for i in range(len(images)): + limit_low.js_link('value', images[i].glyph.color_mapper, 'low') + limit_low.js_link('value', color_bars[i].color_mapper, 'low') + limit_high.js_link('value', images[i].glyph.color_mapper, 'high') + limit_high.js_link('value', color_bars[i].color_mapper, 'high') + + # reset boxes to preset range on button click + reset.js_on_click(limit_reset) + + # also reset when swapping limit style + scale_group.js_on_click(limit_reset) + + # return widgets + spacer = Spacer(height=20) + return [tools_label, scale_set, preset_set, limit_set, reset, spacer] + + def create_figure_title(self): + """Create title for the image""" + self.title = f'{os.path.basename(self.filename)}, {self.extname}' + if isinstance(self.group, list) and isinstance(self.integ, list): + self.title += f', Int {self.integ[0]}, Group {self.group[0]} - Int {self.integ[1]}, Group {self.group[1]}' + else: + if isinstance(self.integ, int): + self.title += f', Int {self.integ}' + elif isinstance(self.integ, list): + self.title += f', Int ({self.integ[0]}-{self.integ[1]})' + + if isinstance(self.group, int): + self.title += f', Group {self.group}' + elif isinstance(self.group, list): + self.title += f', Group ({self.group[0]}-{self.group[1]})' + + def get_bits(self): + """Translate the numerical DQ values in a 2D array into a 2D array where each entry is + a list of the DQ mnemonics that apply to that pixel. + """ + self.bit_list = np.empty(self.data.shape, dtype=object) + goodpix = np.where(self.data == 0) + self.bit_list[goodpix] = ['GOOD'] + badpix = np.where(self.data != 0) + for i in range(len(badpix[0])): + self.bit_list[badpix[0][i], badpix[1][i]] = list(dqflags.dqflags_to_mnemonics( + self.data[badpix[0][i], badpix[1][i]], mnemonic_map=dqflags.pixel)) + + def get_data(self): + """Read in the data from the given fits file and extension name + """ + with fits.open(self.filename) as hdulist: + header = hdulist[self.extname].header + data_shape = hdulist[self.extname].data.shape + self.index_check(data_shape) + if len(data_shape) == 4: + self.data = hdulist[self.extname].data[self.integ, + self.group, :, :] + elif len(data_shape) == 3: + self.data = hdulist[self.extname].data[self.integ, :, :] + self.group = None + elif len(data_shape) == 2: + self.data = hdulist[self.extname].data + self.group = None + self.integ = None + + # If a difference image is requested, create the difference image here + if len(self.data.shape) == 3 and (isinstance(self.group, list) or isinstance(self.integ, list)): + diff_img = self.data[0, :, :] * 1. - self.data[1, :, :] + self.data = diff_img + + # Get the units of the data. This will be reported as the title of the colorbar + try: + self.signal_units = header['BUNIT'] + except KeyError: + self.signal_units = '' + + ny, nx = self.data.shape + col_idx, row_idx = np.indices((ny, nx)) + try: + wcs = WCS(header) + if wcs.has_celestial: + self.wcs_coord = wcs.pixel_to_world_values(col_idx, row_idx) + else: + self.wcs_coord = None + except (ValueError, TypeError): + self.wcs_coord = None + + def get_scale(self): + """Calculate the limits for the display, following the ZScale function + originally created or IRAF. + """ + z = ZScaleInterval(contrast=self.contrast) + if self.mask is None: + limits = z.get_limits(self.data) + else: + goodpix = self.mask == 0 + limits = z.get_limits(self.data[goodpix]) + return limits + + def index_check(self, shapes): + """Check that the group and integ indexes are compatible with the data shape. If the + input data are 3D (e.g. from a calints or rateints file), then self.group is ignored. + Similarly, if the input data are 2D, both self.group and self.integ are ignored. + Parameters + ---------- + shapes : tuple + Tuple of the dimensions of the data in ``self.filename`` + """ + checks = [True] + + # Put groups and ints into lists in all cases, to make comparisons easier + if isinstance(self.group, int): + group = [self.group] + conv_group_to_int = True + else: + group = deepcopy(self.group) + conv_group_to_int = False + if isinstance(self.integ, int): + integ = [self.integ] + conv_integ_to_int = True + else: + integ = deepcopy(self.integ) + conv_integ_to_int = False + + # Check groups and integs vs data shape. If the indexes are negative, translate to + # the appropriate positive value. This is more for the title of the figure than the check here. + if len(shapes) == 4: + group = [shapes[1] + g if g < 0 else g for g in group] + checks.append(np.all(np.array(group) < shapes[1])) + integ = [shapes[0] + i if i < 0 else i for i in integ] + checks.append(np.all(np.array(integ) < shapes[0])) + elif len(shapes) == 3: + integ = [shapes[0] + i if i < 0 else i for i in integ] + checks.append(np.all(np.array(integ) < shapes[0])) + + if not np.all(checks): + raise ValueError( + f'Requested groups {group} or integs {integ} are larger than the input data size of {shapes}.') + + # Return the updated values to the same object type as they were input + if conv_group_to_int: + self.group = group[0] + else: + self.group = group + if conv_integ_to_int: + self.integ = integ[0] + else: + self.integ = integ diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py index 230854cd5..abd82a45e 100644 --- a/jwql/utils/logging_functions.py +++ b/jwql/utils/logging_functions.py @@ -65,6 +65,11 @@ def my_main_function(): import time import traceback +if sys.version_info < (3, 11): + import tomli as tomllib +else: + import tomllib + from functools import wraps from jwql.utils.permissions import set_permissions @@ -159,7 +164,8 @@ def make_log_file(module): # Build filename timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') - filename = '{0}_{1}.log'.format(module, timestamp) + hostname = socket.gethostname() + filename = '{0}_{1}_{2}.log'.format(module, hostname, timestamp) # Determine save location user = pwd.getpwuid(os.getuid()).pw_name @@ -214,17 +220,14 @@ def wrapped(*args, **kwargs): logging.info('System: ' + socket.gethostname()) logging.info('Python Version: ' + sys.version.replace('\n', '')) logging.info('Python Executable Path: ' + sys.executable) + logging.info('Running as PID {}'.format(os.getpid())) # Read in setup.py file to build list of required modules - with open(get_config()['setup_file']) as f: - data = f.readlines() + toml_file = os.path.join(os.path.dirname(get_config()['setup_file']), 'pyproject.toml') + with open(toml_file, "rb") as f: + data = tomllib.load(f) - for i, line in enumerate(data): - if 'REQUIRES = [' in line: - begin = i + 1 - elif 'setup(' in line: - end = i - 2 - required_modules = data[begin:end] + required_modules = data['project']['dependencies'] # Clean up the module list module_list = [item.strip().replace("'", "").replace(",", "").split("=")[0].split(">")[0].split("<")[0] for item in required_modules] @@ -233,16 +236,19 @@ def wrapped(*args, **kwargs): for module in module_list: try: mod = importlib.import_module(module) - logging.info(module + ' Version: ' + mod.__version__) + logging.info(module + ' Version: ' + importlib.metadata.version(module)) logging.info(module + ' Path: ' + mod.__path__[0]) except (ImportError, AttributeError) as err: logging.warning(err) # nosec comment added to ignore bandit security check - environment = subprocess.check_output(['conda', 'env', 'export'], universal_newlines=True) # nosec - logging.info('Environment:') - for line in environment.split('\n'): - logging.info(line) + try: + environment = subprocess.check_output('conda env export', universal_newlines=True, shell=True) # nosec + logging.info('Environment:') + for line in environment.split('\n'): + logging.info(line) + except Exception as err: # catch any exception and report the entire traceback + logging.exception(err) # Call the function and time it t1_cpu = time.perf_counter() diff --git a/jwql/utils/mast_utils.py b/jwql/utils/mast_utils.py index d9e2c58bb..6524a8c6a 100644 --- a/jwql/utils/mast_utils.py +++ b/jwql/utils/mast_utils.py @@ -15,8 +15,243 @@ """ -from jwql.jwql_monitors import monitor_mast -from jwql.utils.constants import JWST_DATAPRODUCTS, JWST_INSTRUMENT_NAMES_MIXEDCASE +import logging +import os + +from astroquery.mast import Mast +from bokeh.embed import components +from bokeh.io import save, output_file +import pandas as pd + +from jwql.utils.constants import JWST_DATAPRODUCTS, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, MAST_QUERY_LIMIT +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import ensure_dir_exists, get_config +from jwql.utils.plotting import bar_chart + + +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +if not ON_GITHUB_ACTIONS: + Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] + +# Increase the limit on the number of entries that can be returned by +# a MAST query. +Mast._portal_api_connection.PAGESIZE = MAST_QUERY_LIMIT + + +def instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, + add_filters=None, add_requests=None, + caom=False, return_data=False): + """Get the counts for a given instrument and data product + + Parameters + ---------- + instrument: str + The instrument name, i.e. one of ['niriss','nircam','nirspec', + 'miri','fgs'] + dataproduct: sequence, str + The type of data product to search + add_filters: dict + The ('paramName':'values') pairs to include in the 'filters' + argument of the request e.g. add_filters = {'filter':'GR150R'} + add_requests: dict + The ('request':'value') pairs to include in the request + e.g. add_requests = {'pagesize':1, 'page':1} + caom: bool + Query CAOM service + return_data: bool + Return the actual data instead of counts only + + Returns + ------- + int, dict + The number of database records that satisfy the search criteria + or a dictionary of the data if `return_data=True` + """ + filters = [] + + # Make sure the dataproduct is a list + if isinstance(dataproduct, str): + dataproduct = [dataproduct] + + # Make sure the instrument is supported + if instrument.lower() not in [ins.lower() for ins in JWST_INSTRUMENT_NAMES]: + raise TypeError('Supported instruments include:', JWST_INSTRUMENT_NAMES) + + # CAOM service + if caom: + + # Declare the service + service = 'Mast.Caom.Filtered' + + # Set the filters + filters += [{'paramName': 'obs_collection', 'values': ['JWST']}, + {'paramName': 'instrument_name', 'values': [instrument]}, + {'paramName': 'dataproduct_type', 'values': dataproduct}] + + # Instruent filtered service + else: + + # Declare the service + service = 'Mast.Jwst.Filtered.{}'.format(instrument.title()) + + # Include additonal filters + if isinstance(add_filters, dict): + filters += [{"paramName": name, "values": [val]} + for name, val in add_filters.items()] + + # Assemble the request + params = {'columns': 'COUNT_BIG(*)', + 'filters': filters, + 'removenullcolumns': True} + + # Just get the counts + if return_data: + params['columns'] = '*' + + # Add requests + if isinstance(add_requests, dict): + params.update(add_requests) + + response = Mast.service_request_async(service, params) + result = response[0].json() + + # Return all the data + if return_data: + return result + + # Or just the counts + else: + return result['data'][0]['Column1'] + + +def instrument_keywords(instrument, caom=False): + """Get the keywords for a given instrument service + + Parameters + ---------- + instrument: str + The instrument name, i.e. one of ['niriss','nircam','nirspec', + 'miri','fgs'] + caom: bool + Query CAOM service + + Returns + ------- + pd.DataFrame + A DataFrame of the keywords + """ + # Retrieve one dataset to get header keywords + if not caom: + filter_to_add = {'program': '01440'} + else: + filter_to_add = {'proposal_id': '01440'} + sample = instrument_inventory(instrument, return_data=True, caom=caom, + add_requests={'pagesize': 1, 'page': 1}, + add_filters=filter_to_add) + data = [[i['name'], i['type']] for i in sample['fields']] + keywords = pd.DataFrame(data, columns=('keyword', 'dtype')) + + return keywords + + +def jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, + dataproducts=['image', 'spectrum', 'cube'], + caom=False, plot=False, output_dir=None): + """Gather a full inventory of all JWST data in each instrument + service by instrument/dtype + + Parameters + ---------- + instruments: sequence + The list of instruments to count + dataproducts: sequence + The types of dataproducts to count + caom: bool + Query CAOM service + plot: bool + Return a pie chart of the data + output_dir: str + Directory into which plots are saved + + Returns + ------- + astropy.table.table.Table + The table of record counts for each instrument and mode + """ + if output_dir is None: + output_dir = os.path.join(get_config()['outputs'], 'mast_utils') + ensure_dir_exists(output_dir) + + logging.info('Searching database...') + # Iterate through instruments + inventory, keywords = [], {} + for instrument in instruments: + ins = [instrument] + for dp in dataproducts: + count = instrument_inventory(instrument, dataproduct=dp, caom=caom) + ins.append(count) + + # Get the total + ins.append(sum(ins[-3:])) + + # Add it to the list + inventory.append(ins) + + # Add the keywords to the dict + keywords[instrument] = instrument_keywords(instrument, caom=caom) + + logging.info('Completed database search for {} instruments and {} data products.'. + format(instruments, dataproducts)) + + # Make the table + all_cols = ['instrument'] + dataproducts + ['total'] + table = pd.DataFrame(inventory, columns=all_cols) + + # Plot it + if plot: + if caom: + output_filename = 'database_monitor_caom' + else: + output_filename = 'database_monitor_jwst' + + # Make the plot + plt = bar_chart(table, 'instrument', dataproducts, + title="JWST Inventory") + + # Save the plot as full html + html_filename = output_filename + '.html' + outfile = os.path.join(output_dir, html_filename) + output_file(outfile) + save(plt) + set_permissions(outfile) + + logging.info('Saved Bokeh plots as HTML file: {}'.format(html_filename)) + + # Save the plot as components + plt.sizing_mode = 'stretch_both' + script, div = components(plt) + + div_outfile = os.path.join(output_dir, output_filename + "_component.html") + with open(div_outfile, 'w') as f: + f.write(div) + f.close() + set_permissions(div_outfile) + + script_outfile = os.path.join(output_dir, output_filename + "_component.js") + with open(script_outfile, 'w') as f: + f.write(script) + f.close() + set_permissions(script_outfile) + + logging.info('Saved Bokeh components files: {}_component.html and {}_component.js'.format( + output_filename, output_filename)) + + # Melt the table + table = pd.melt(table, id_vars=['instrument'], + value_vars=dataproducts, + value_name='files', var_name='dataproduct') + + return table, keywords def mast_query(instrument, templates, start_date, end_date, aperture=None, detector=None, filter_name=None, @@ -73,7 +308,7 @@ def mast_query(instrument, templates, start_date, end_date, aperture=None, detec # Make sure instrument is correct case instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()] - # monitor_mast.instrument_inventory does not allow list inputs to + # instrument_inventory does not allow list inputs to # the added_filters input (or at least if you do provide a list, then # it becomes a nested list when it sends the query to MAST. The # nested list is subsequently ignored by MAST.) @@ -101,8 +336,8 @@ def mast_query(instrument, templates, start_date, end_date, aperture=None, detec if lamp is not None: parameters["lamp"] = lamp - query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, - add_filters=parameters, return_data=True, caom=False) + query = instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, + add_filters=parameters, return_data=True, caom=False) if len(query['data']) > 0: query_results.extend(query['data']) @@ -147,7 +382,7 @@ def mast_query_miri(detector, aperture, templates, start_date, end_date): instrument = 'MIRI' - # monitor_mast.instrument_inventory does not allow list inputs to + # instrument_inventory does not allow list inputs to # the added_filters input (or at least if you do provide a list, then # it becomes a nested list when it sends the query to MAST. The # nested list is subsequently ignored by MAST.) @@ -164,8 +399,8 @@ def mast_query_miri(detector, aperture, templates, start_date, end_date): parameters = {"date_obs_mjd": {"min": start_date, "max": end_date}, "detector": detector, "exp_type": template_name} - query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, - add_filters=parameters, return_data=True, caom=False) + query = instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, + add_filters=parameters, return_data=True, caom=False) if len(query['data']) > 0: query_results.extend(query['data']) diff --git a/jwql/utils/monitor_utils.py b/jwql/utils/monitor_utils.py index 533eea0e4..f5c673cbd 100644 --- a/jwql/utils/monitor_utils.py +++ b/jwql/utils/monitor_utils.py @@ -5,6 +5,7 @@ - Matthew Bourque - Bryan Hilbert + - Maria Pena-Guerrero Use --- @@ -17,12 +18,35 @@ """ import datetime import os +from astroquery.mast import Mast, Observations +from django import setup -from jwql.database.database_interface import Monitor -from jwql.jwql_monitors import monitor_mast -from jwql.utils.constants import ASIC_TEMPLATES, JWST_DATAPRODUCTS +from jwql.database.database_interface import Monitor, engine +from jwql.utils.constants import ASIC_TEMPLATES, JWST_DATAPRODUCTS, MAST_QUERY_LIMIT from jwql.utils.logging_functions import configure_logging, get_log_status +from jwql.utils import mast_utils +from jwql.utils.utils import filename_parser + + +# Increase the limit on the number of entries that can be returned by +# a MAST query. +Mast._portal_api_connection.PAGESIZE = MAST_QUERY_LIMIT + +# Determine if the code is being run as part of a github action or Readthedocs build +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +ON_READTHEDOCS = False +if 'READTHEDOCS' in os.environ: # pragma: no cover + ON_READTHEDOCS = os.environ['READTHEDOCS'] + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # These lines are needed in order to use the Django models in a standalone + # script (as opposed to code run as a result of a webpage request). If these + # lines are not run, the script will crash when attempting to import the + # Django models in the line below. + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + setup() + from jwql.website.apps.jwql.models import RootFileInfo def exclude_asic_tuning(mast_results): @@ -113,7 +137,7 @@ def mast_query_darks(instrument, aperture, start_date, end_date, readpatt=None): instrument = 'MIRI' dark_template = ['MIR_DARKALL', 'MIR_DARKIMG', 'MIR_DARKMRS'] - # monitor_mast.instrument_inventory does not allow list inputs to + # instrument_inventory does not allow list inputs to # the added_filters input (or at least if you do provide a list, then # it becomes a nested list when it sends the query to MAST. The # nested list is subsequently ignored by MAST.) @@ -124,14 +148,13 @@ def mast_query_darks(instrument, aperture, start_date, end_date, readpatt=None): # Create dictionary of parameters to add parameters = {"date_obs_mjd": {"min": start_date, "max": end_date}, - "apername": aperture, "exp_type": template_name, - } + "apername": aperture, "exp_type": template_name, } if readpatt is not None: parameters["readpatt"] = readpatt - query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, - add_filters=parameters, return_data=True, caom=False) + query = mast_utils.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, + add_filters=parameters, return_data=True, caom=False) if 'data' in query.keys(): if len(query['data']) > 0: query_results.extend(query['data']) @@ -139,6 +162,110 @@ def mast_query_darks(instrument, aperture, start_date, end_date, readpatt=None): return query_results +def mast_query_ta(instrument, aperture, start_date, end_date, readpatt=None): + """Use ``astroquery`` to search MAST for TA current data + + Parameters + ---------- + instrument : str + Instrument name (e.g. ``nirspec``) + + aperture : str + Detector aperture to search for (e.g. ``NRS_S1600A1_SLIT``) + + start_date : float + Starting date for the search in MJD + + end_date : float + Ending date for the search in MJD + + readpatt : str + Readout pattern to search for (e.g. ``RAPID``). If None, + readout pattern will not be added to the query parameters. + + Returns + ------- + query_results : list + List of dictionaries containing the query results + """ + + # Make sure instrument is correct case + if instrument.lower() == 'nirspec': + instrument = 'Nirspec' + if aperture == 'NRS_S1600A1_SLIT': + exp_types = ['NRS_TASLIT', 'NRS_BOTA', 'NRS_WATA'] + else: + exp_types = ['NRS_TACQ', 'NRS_MSATA'] + + # instrument_inventory does not allow list inputs to + # the added_filters input (or at least if you do provide a list, then + # it becomes a nested list when it sends the query to MAST. The + # nested list is subsequently ignored by MAST.) + # So query once for each exp_type, and combine outputs into a + # single list. + query_results = [] + for template_name in exp_types: + + # Create dictionary of parameters to add + parameters = {"date_obs_mjd": {"min": start_date, "max": end_date}, + "apername": aperture, "exp_type": template_name} + + if readpatt is not None: + parameters["readpatt"] = readpatt + + query = mast_utils.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, + add_filters=parameters, return_data=True, caom=False) + if 'data' in query.keys(): + if len(query['data']) > 0: + query_results.extend(query['data']) + + return query_results + + +def model_query_ta(instrument, aperture, start_date, end_date, readpatt=None): + """Use local Django model to search for TA data. + + Parameters + ---------- + instrument : str + Instrument name (e.g. ``nirspec``) + aperture : str + Detector aperture to search for (e.g. ``NRS_S1600A1_SLIT``) + start_date : float + Starting date for the search in MJD + end_date : float + Ending date for the search in MJD + readpatt : str + Readout pattern to search for (e.g. ``RAPID``). If None, + readout pattern will not be added to the query parameters. + + Returns + ------- + query_results : list + List of dictionaries containing the query results + """ + if aperture == 'NRS_S1600A1_SLIT': + exp_types = ['NRS_TASLIT', 'NRS_BOTA', 'NRS_WATA'] + else: + exp_types = ['NRS_TACQ', 'NRS_MSATA'] + + filter_kwargs = { + 'instrument__iexact': instrument, + 'aperture__iexact': aperture, + 'exp_type__in': exp_types, + 'expstart__gte': start_date, + 'expstart__lte': end_date + } + + if readpatt is not None: + filter_kwargs['readpatt'] = readpatt + + # get file info by instrument from local model + root_file_info = RootFileInfo.objects.filter(**filter_kwargs) + + return root_file_info.values() + + def update_monitor_table(module, start_time, log_file): """Update the ``monitor`` database table with information about the instrument monitor run @@ -159,4 +286,5 @@ def update_monitor_table(module, start_time, log_file): new_entry['status'] = get_log_status(log_file) new_entry['log_file'] = os.path.basename(log_file) - Monitor.__table__.insert().execute(new_entry) + with engine.begin() as connection: + connection.execute(Monitor.__table__.insert(), new_entry) diff --git a/jwql/utils/organize_filesystem.py b/jwql/utils/organize_filesystem.py index 62b59a8de..b4dcf483e 100644 --- a/jwql/utils/organize_filesystem.py +++ b/jwql/utils/organize_filesystem.py @@ -64,10 +64,10 @@ def organize_filesystem(): dst = os.path.join(destination_directory, os.path.basename(src)) # Create parent directories if necessary - #ensure_dir_exists(destination_directory) + # ensure_dir_exists(destination_directory) # Move the file over - #shutil.move(src, dst) + # shutil.move(src, dst) print('\tMoved {} to {}'.format(src, dst)) @@ -103,10 +103,10 @@ def revert_filesystem(): dst = os.path.join(destination_directory, os.path.basename(src)) # Create parent directories if necessary - #ensure_dir_exists(destination_directory) + # ensure_dir_exists(destination_directory) # Move the file over - #shutil.move(src, dst) + # shutil.move(src, dst) print('\tMoved {} to {}'.format(src, dst)) diff --git a/jwql/utils/permissions.py b/jwql/utils/permissions.py index 5c6c3c33e..2524b1c7c 100644 --- a/jwql/utils/permissions.py +++ b/jwql/utils/permissions.py @@ -82,7 +82,49 @@ DEFAULT_GROUP = 'jwql_dev' # set the default mode for DEFAULT_OWNER -DEFAULT_MODE = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP # equivalent to '?rwxr-x---' +DEFAULT_MODE = stat.S_IREAD | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH # equivalent to '?rw-r--r--' +LOCAL_MODE = stat.S_IREAD | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH # equivalent to '?rw-rw-r--' +DEFAULT_MODES = {DEFAULT_OWNER: DEFAULT_MODE, + 'other': LOCAL_MODE} + + +def find_mode_to_use(pathname, owner, mode): + """Select the appropriate mode to use for the input pathname, + depending on who the owner is, as well as whether the pathname + is a file or directory. + + Parameters + ---------- + pathname : str + Directory or file to be inspected + owner : str + String representation of the owner + mode : dict + Dictionary of integer representation of the permission mode, compatible with + ``os.stat`` output. Keys are (with a default of DEFAULT_OWNER), + and 'other', so we can differentiate between files created by the server + accounts and those created by local users + + Returns + ------- + mode_value : int + Integer representation of the permission mode, compatible + with ``os.stat`` output + """ + if owner in get_owner_string(pathname): + if DEFAULT_OWNER in owner: + mode_value = mode[DEFAULT_OWNER] + else: + mode_value = mode['other'] + else: + mode_value = mode['other'] + + # Default permissions are for a file. If pathname is a directory, then + # make it owner and group executable as well + if os.path.isdir(pathname): + mode_value = mode_value | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + + return mode_value def get_group_string(pathname): @@ -123,7 +165,7 @@ def get_owner_string(pathname): return owner_name -def has_permissions(pathname, owner=DEFAULT_OWNER, mode=DEFAULT_MODE, group=DEFAULT_GROUP): +def has_permissions(pathname, owner=DEFAULT_OWNER, mode=DEFAULT_MODES, group=DEFAULT_GROUP): """Return boolean indicating whether ``pathname`` has the specified owner, permission, and group scheme. @@ -147,20 +189,24 @@ def has_permissions(pathname, owner=DEFAULT_OWNER, mode=DEFAULT_MODE, group=DEFA file_statinfo = os.stat(pathname) groupinfo = grp.getgrgid(file_statinfo.st_gid) + mode_to_use = find_mode_to_use(pathname, owner, mode) + # complement mode depending on whether input is file or directory if os.path.isfile(pathname): - mode = mode | stat.S_IFREG + mode_to_use = mode_to_use | stat.S_IFREG elif os.path.isdir(pathname): - mode = mode | stat.S_IFDIR + mode_to_use = mode_to_use | stat.S_IFDIR - if (get_owner_string(pathname) != owner) or (file_statinfo.st_mode != mode)\ + # This will be False in all cases when running on the servers, since the DEFAULT_OWNER + # does not match the full owner names + if (get_owner_string(pathname) != owner) or (file_statinfo.st_mode != mode_to_use)\ or (groupinfo.gr_name != group): return False return True -def set_permissions(pathname, owner=DEFAULT_OWNER, mode=DEFAULT_MODE, group=DEFAULT_GROUP, verbose=False): +def set_permissions(pathname, owner=DEFAULT_OWNER, mode=DEFAULT_MODES, group=DEFAULT_GROUP, verbose=False): """Set mode and group of the file/directory identfied by ``pathname``, if and only if it is owned by ``owner``. @@ -170,23 +216,32 @@ def set_permissions(pathname, owner=DEFAULT_OWNER, mode=DEFAULT_MODE, group=DEFA Directory or file to be inspected owner : str String representation of the owner - mode : int - Integer representation of the permission mode, compatible with - ``os.stat`` output + mode : dict + Dictionary of integer representation of the permission mode, compatible with + ``os.stat`` output. Keys are (with a default of DEFAULT_OWNER), + and 'other', so we can differentiate between files created by the server + accounts and those created by local users group : str String representation of the group verbose : bool Boolean indicating whether verbose output is requested """ + # When using the defaults, if the file is owned by one of the server + # accounts, then set the permissions using the DEFAULT_OWNER's default + # permissions. Otherwise fall back to the 'other' default permissions. + mode_to_use = find_mode_to_use(pathname, owner, mode) + if verbose: print('\nBefore:') show_permissions(pathname) if not has_permissions(pathname): - if get_owner_string(pathname) == owner: - os.chmod(pathname, mode) + try: + os.chmod(pathname, mode_to_use) # change group but not owner os.chown(pathname, -1, grp.getgrnam(group).gr_gid) + except (PermissionError, KeyError): + pass if verbose: print('After:') diff --git a/jwql/utils/preview_image.py b/jwql/utils/preview_image.py index dd29da687..f86c10272 100755 --- a/jwql/utils/preview_image.py +++ b/jwql/utils/preview_image.py @@ -37,22 +37,35 @@ import logging import os import socket +import warnings from astropy.io import fits import numpy as np from jwql.utils import permissions +from jwql.utils.utils import get_config # Use the 'Agg' backend to avoid invoking $DISPLAY import matplotlib matplotlib.use('Agg') -import matplotlib.pyplot as plt -import matplotlib.colors as colors +import matplotlib.pyplot as plt # noqa +import matplotlib.colors as colors # noqa +from matplotlib.ticker import AutoMinorLocator # noqa # Only import jwst if not running from readthedocs -if 'build' and 'project' not in socket.gethostname(): +# Determine if the code is being run as part of a Readthedocs build +ON_READTHEDOCS = False +if 'READTHEDOCS' in os.environ: + ON_READTHEDOCS = os.environ['READTHEDOCS'] + +if not ON_READTHEDOCS: from jwst.datamodels import dqflags +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + CONFIGS = get_config() + class PreviewImage(): """An object for generating and saving preview images, used by @@ -122,6 +135,44 @@ def __init__(self, filename, extension): # Read in file self.data, self.dq = self.get_data(self.file, extension) + def determine_map_file(self, header): + """Determine which file contains the map of non-science pixels given a + file header + + Parameters + ---------- + header : astropy.io.fits.header + Header object from an HDU object + """ + if header['INSTRUME'] == 'MIRI': + # MIRI imaging files use the external MIRI non-science map. Note that MIRI_CORONCAL and + # MIRI_LYOT observations also have 'mirimage' in the filename. We deal with this in + # crop_to_subarray() + if 'CORONMSK' not in header: + self.nonsci_map_file = (os.path.join(CONFIGS['outputs'], 'non_science_maps', 'mirimage_non_science_map.fits')) + elif header['CORONMSK'] == '4QPM_1065': + self.nonsci_map_file = (os.path.join(CONFIGS['outputs'], 'non_science_maps', 'miri4qpm_1065_non_science_map.fits')) + elif header['CORONMSK'] == '4QPM_1140': + self.nonsci_map_file = (os.path.join(CONFIGS['outputs'], 'non_science_maps', 'miri4qpm_1140_non_science_map.fits')) + elif header['CORONMSK'] == '4QPM_1550': + self.nonsci_map_file = (os.path.join(CONFIGS['outputs'], 'non_science_maps', 'miri4qpm_1550_non_science_map.fits')) + elif header['CORONMSK'] in ['LYOT', 'LYOT_2300']: + self.nonsci_map_file = (os.path.join(CONFIGS['outputs'], 'non_science_maps', 'mirilyot_non_science_map.fits')) + + elif header['INSTRUME'] == 'NIRSPEC': + if 'NRSIRS2' in header['READPATT']: + # IRS2 mode arrays are very different sizes between uncal and i2d files. For the uncal, + # use the external non-science map. The i2d files we can treat like i2d files from the + # other NIR detectors. + if header['DETECTOR'] == 'NRS1': + self.nonsci_map_file = (os.path.join(CONFIGS['outputs'], 'non_science_maps', 'nrs1_irs2_non_science_map.fits')) + elif header['DETECTOR'] == 'NRS2': + self.nonsci_map_file = (os.path.join(CONFIGS['outputs'], 'non_science_maps', 'nrs2_irs2_non_science_map.fits')) + else: + self.nonsci_map_file = None + else: + self.nonsci_map_file = None + def difference_image(self, data): """ Create a difference image from the data. Use last group minus @@ -141,7 +192,7 @@ def difference_image(self, data): """ return data[:, -1, :, :] - data[:, 0, :, :] - def find_limits(self, data, pixmap, clipperc): + def find_limits(self, data): """ Find the minimum and maximum signal levels after clipping the top and bottom ``clipperc`` of the pixels. @@ -150,24 +201,39 @@ def find_limits(self, data, pixmap, clipperc): ---------- data : obj 2D numpy ndarray of floats - pixmap : obj - 2D numpy ndarray boolean array of science pixel locations - (``True`` for science pixels, ``False`` for non-science - pixels) - clipperc : float - Fraction of top and bottom signal levels to clip (e.g. 0.01 - means to clip brightest and dimmest 1% of pixels) Returns ------- results : tuple Tuple of floats, minimum and maximum signal levels """ + # Ignore any pixels that are NaN + finite = np.isfinite(data) + + # If all non-science pixels are NaN then we're sunk. Scale + # from 0 to 1. + if not np.any(finite): + logging.info('No pixels with finite signal. Scaling from 0 to 1') + return (0., 1.) + + # Combine maps of science pixels and finite pixels + pixmap = self.dq & finite + + # If all non-science pixels are NaN then we're sunk. Scale + # from 0 to 1. + if not np.any(pixmap): + logging.info('No pixels with finite signal. Scaling from 0 to 1') + return (0., 1.) + + sorted_pix = np.sort(data[pixmap], axis=None) + + # Determine how many pixels to clip off of the high and low ends nelem = np.sum(pixmap) - numclip = np.int(clipperc * nelem) - sorted = np.sort(data[pixmap], axis=None) - minval = sorted[numclip] - maxval = sorted[-numclip - 1] + numclip = np.int32(self.clip_percent * nelem) + + # Determine min and max scaling levels + minval = sorted_pix[numclip] + maxval = sorted_pix[-numclip - 1] return (minval, maxval) def get_data(self, filename, ext): @@ -202,18 +268,75 @@ def get_data(self, filename, ext): if ext in extnames: dimensions = len(hdulist[ext].data.shape) if dimensions == 4: - data = hdulist[ext].data[:, [0, -1], :, :].astype(np.float) + data = hdulist[ext].data[:, [0, -1], :, :].astype(float) else: - data = hdulist[ext].data.astype(np.float) + data = hdulist[ext].data.astype(float) + yd, xd = data.shape[-2:] + try: + self.units = f"{hdulist[ext].header['BUNIT']} " + except KeyError: + self.units = '' else: raise ValueError('WARNING: no {} extension in {}!'.format(ext, filename)) - if 'PIXELDQ' in extnames: - dq = hdulist['PIXELDQ'].data - dq = (dq & dqflags.pixel['NON_SCIENCE'] == 0) + # For files that have no DQ extension, we get a map of the non-science + # pixels from a dedicated map file. Getting this info from the DQ extension + # doesn't work for uncal and i2d files, nor MIRI rate files. + self.determine_map_file(hdulist[0].header) + + if (('uncal' in filename) or ('i2d' in filename)): + # uncal files have no DQ extensions, so we can't get a map of non-science pixels from the + # data itself. + if 'miri' in filename: + if 'mirimage' in filename: + dq = self.nonsci_from_file() + dq = crop_to_subarray(dq, hdulist[0].header, xd, yd) + dq = expand_for_i2d(dq, xd, yd) + else: + # For MIRI MRS/LRS data, we don't worry about non-science pixels, so create a map where all + # pixels are good. + dq = np.ones((yd, xd), dtype="bool").astype(bool) + elif 'nrs' in filename: + if 'NRSIRS2' in hdulist[0].header['READPATT']: + # IRS2 mode arrays are very different sizes between uncal and i2d files. For the uncal, + # use the external non-science map. The i2d files we can treat like i2d files from the + # other NIR detectors. + if 'uncal' in filename: + dq = self.nonsci_from_file() + # ISR2 data are always full frame, so no need to crop to subarray + # and since we are guaranteed to have an uncal file, no need to expand for i2d + elif 'i2d' in filename: + dq = create_nir_nonsci_map() + dq = crop_to_subarray(dq, hdulist[0].header, xd, yd) + dq = expand_for_i2d(dq, xd, yd) + else: + # NIRSpec observations that do not use IRS2 use the "standard" NIR detector non-science map. + # i.e. 4 outer rows and columns are refernece pixels + dq = create_nir_nonsci_map() + dq = crop_to_subarray(dq, hdulist[0].header, xd, yd) + dq = expand_for_i2d(dq, xd, yd) + else: + # All NIRCam, NIRISS, and FGS observations also use the "standard" NIR detector non-science map. + dq = create_nir_nonsci_map() + dq = crop_to_subarray(dq, hdulist[0].header, xd, yd) + dq = expand_for_i2d(dq, xd, yd) + elif 'rate' in filename: + # For rate/rateints images all we need to worry about is MIRI imaging files. For those we use + # the external non-science map, because the pipeline does not add the NON_SCIENCE flags + # to the MIRI DQ extensions until the data are flat fielded, which is after the rate + # files have been created. + if 'mirimage' in filename: + dq = self.nonsci_from_file() + dq = crop_to_subarray(dq, hdulist[0].header, xd, yd) + dq = expand_for_i2d(dq, xd, yd) + else: + # For everything other than MIRI imaging, we get the non-science map from the + # DQ array in the file. + dq = self.get_nonsci_map(hdulist, extnames, xd, yd) else: - yd, xd = data.shape[-2:] - dq = np.ones((yd, xd), dtype="bool") + # For all file suffixes other than uncal and rate/rateints, we get the non-science map + # from the DQ array in the file. + dq = self.get_nonsci_map(hdulist, extnames, xd, yd) # Collect information on aperture location within the # full detector. This is needed for mosaicking NIRCam @@ -229,8 +352,52 @@ def get_data(self, filename, ext): else: raise FileNotFoundError('WARNING: {} does not exist!'.format(filename)) + if dq.shape != data.shape[-2:]: + raise ValueError(f'DQ array does not have the same shape as the data in {filename}') + return data, dq + def get_nonsci_map(self, hdulist, extensions, xdim, ydim): + """Create a map of non-science pixels for a given HDUList. If there is no DQ + extension in the HDUList, assume all pixels are science pixels. + + Parameters + ---------- + hdulist : astropy.io.fits.HDUList + HDUList object from a fits file + + extensions : list + List of extension names in the HDUList + + xdim : int + Number of columns in data array. Only used if there is no DQ extension + + ydim : int + Number of rows in the data array. Only used if there is no DQ extension + + Returns + ------- + dq : numpy.ndarray + 2D boolean array giving locations of non-science pixels + """ + if 'DQ' in extensions: + dq = hdulist['DQ'].data + + # For files with multiple integrations (rateints, calints), chop down the + # DQ array to a single frame, since the non-science pixels will be the same + # in all integrations + if len(dq.shape) == 3: + dq = dq[0, :, :] + elif len(dq.shape) == 4: + dq = dq[0, 0, :, :] + + dq = (dq & (dqflags.pixel['NON_SCIENCE'] | dqflags.pixel['REFERENCE_PIXEL']) == 0) + else: + # If there is no DQ extension in the HDUList, then we create a dq map where we assume + # that all of the pixels are science pixels + dq = np.ones((ydim, xdim), dtype=bool) + return dq + def make_figure(self, image, integration_number, min_value, max_value, scale, maxsize=8, thumbnail=False): """ @@ -289,20 +456,21 @@ def make_figure(self, image, integration_number, min_value, max_value, # If making a thumbnail, make a figure with no axes if thumbnail: - fig = plt.imshow(shiftdata, - norm=colors.LogNorm(vmin=shiftmin, - vmax=shiftmax), - cmap=self.cmap) + self.fig, ax = plt.subplots(figsize=(3, 3)) + cax = ax.imshow(shiftdata, + norm=colors.LogNorm(vmin=shiftmin, + vmax=shiftmax), + cmap=self.cmap) # Invert y axis plt.gca().invert_yaxis() plt.axis('off') - fig.axes.get_xaxis().set_visible(False) - fig.axes.get_yaxis().set_visible(False) + cax.axes.get_xaxis().set_visible(False) + cax.axes.get_yaxis().set_visible(False) # If preview image, add axes and colorbars else: - fig, ax = plt.subplots(figsize=(xsize, ysize)) + self.fig, ax = plt.subplots(figsize=(xsize, ysize)) cax = ax.imshow(shiftdata, norm=colors.LogNorm(vmin=shiftmin, vmax=shiftmax), @@ -329,9 +497,16 @@ def make_figure(self, image, integration_number, min_value, max_value, dig = 2 format_string = "%.{}f".format(dig) tlabelstr = [format_string % number for number in tlabelflt] - cbar = fig.colorbar(cax, ticks=tickvals) + cbar = self.fig.colorbar(cax, ticks=tickvals) + + # This seems to correctly remove the ticks and labels we want to remove. It gives a warning that + # it doesn't work on log scales, which we don't care about. So let's ignore that warning. + warnings.filterwarnings("ignore", message="AutoMinorLocator does not work with logarithmic scale") + cbar.ax.yaxis.set_minor_locator(AutoMinorLocator(n=0)) + cbar.ax.set_yticklabels(tlabelstr) cbar.ax.tick_params(labelsize=maxsize * 5. / 4) + cbar.ax.set_ylabel(self.units, labelpad=10, rotation=270) ax.set_xlabel('Pixels', fontsize=maxsize * 5. / 4) ax.set_ylabel('Pixels', fontsize=maxsize * 5. / 4) ax.tick_params(labelsize=maxsize) @@ -342,9 +517,12 @@ def make_figure(self, image, integration_number, min_value, max_value, plt.rcParams.update({'xtick.labelsize': maxsize * 5. / 4}) elif scale == 'linear': - fig, ax = plt.subplots(figsize=(xsize, ysize)) + self.fig, ax = plt.subplots(figsize=(xsize, ysize)) cax = ax.imshow(image, clim=(min_value, max_value), cmap=self.cmap) + # Invert y axis + plt.gca().invert_yaxis() + if not thumbnail: cbar = fig.colorbar(cax) ax.set_xlabel('Pixels') @@ -353,10 +531,19 @@ def make_figure(self, image, integration_number, min_value, max_value, # If preview image, set a title if not thumbnail: filename = os.path.split(self.file)[-1] - ax.set_title(filename + ' Int: {}'.format(np.int(integration_number))) + ax.set_title(filename + ' Int: {}'.format(int(integration_number))) - def make_image(self, max_img_size=8): - """The main function of the ``PreviewImage`` class.""" + def make_image(self, max_img_size=8.0, create_thumbnail=False): + """The main function of the ``PreviewImage`` class. + + Parameters + ---------- + max_img_size : float + Image size in the largest dimension + + create_thumbnail : bool + If True, a thumbnail image is created and saved. + """ shape = self.data.shape @@ -387,8 +574,12 @@ def make_image(self, max_img_size=8): frame = diff_img[i, :, :] # Find signal limits for the display - minval, maxval = self.find_limits(frame, self.dq, - self.clip_percent) + minval, maxval = self.find_limits(frame) + + # Set NaN values to zero, so that those pixels + # do not appear as big white splotches in the jpgs + # after matplotlib downsamples/averages + frame = nan_to_zero(frame) # Create preview image matplotlib object indir, infile = os.path.split(self.file) @@ -401,12 +592,12 @@ def make_image(self, max_img_size=8): self.make_figure(frame, i, minval, maxval, self.scaling.lower(), maxsize=max_img_size, thumbnail=False) self.save_image(outfile, thumbnail=False) - plt.close() + plt.close(self.fig) self.preview_images.append(outfile) # Create thumbnail image matplotlib object, only for the # first integration - if i == 0: + if i == 0 and create_thumbnail: if self.thumbnail_output_directory is None: outdir = indir else: @@ -415,8 +606,24 @@ def make_image(self, max_img_size=8): self.make_figure(frame, i, minval, maxval, self.scaling.lower(), maxsize=max_img_size, thumbnail=True) self.save_image(outfile, thumbnail=True) - plt.close() - self.thumbnail_images.append(outfile) + plt.close(self.fig) + self.thumbnail_images.append(self.thumbnail_filename) + + def nonsci_from_file(self): + """Read in a map of non-science/reference pixels from a fits file + + Parameters + ---------- + filename : str + Name of fits file to be read in. + + Returns + ------- + map : numpy.ndarray + 2D boolean array of pixel values + """ + map = fits.getdata(self.nonsci_map_file) + return map.astype(bool) def save_image(self, fname, thumbnail=False): """ @@ -435,14 +642,140 @@ def save_image(self, fname, thumbnail=False): True if saving a thumbnail image, false for the full preview image. """ - plt.savefig(fname, bbox_inches='tight', pad_inches=0) permissions.set_permissions(fname) # If the image is a thumbnail, rename to '.thumb' if thumbnail: - thumb_fname = fname.replace('.jpg', '.thumb') - os.rename(fname, thumb_fname) - logging.info('\tSaved image to {}'.format(thumb_fname)) + self.thumbnail_filename = fname.replace('.jpg', '.thumb') + os.rename(fname, self.thumbnail_filename) + logging.info('\tSaved image to {}'.format(self.thumbnail_filename)) else: logging.info('\tSaved image to {}'.format(fname)) + self.thumbnail_filename = None + + +def create_nir_nonsci_map(): + """Create a map of non-science pixels for a near-IR detector + + Returns + ------- + arr : numpy.ndarray + 2D boolean array. Science pixels have a value of 1 and non-science pixels + (reference pixels) have a value of 0. + """ + arr = np.ones((2048, 2048), dtype=int) + arr[0:4, :] = 0 + arr[:, 0:4] = 0 + arr[2044:, :] = 0 + arr[:, 2044:] = 0 + return arr.astype(bool) + + +def crop_to_subarray(arr, header, xdim, ydim): + """Given a full frame array, along with a fits HDU header containing subarray + information, crop the array down to the indicated subarray. + + Parameters + ---------- + arr : numpy.ndarray + 2D array of data. Assumed to be full frame (2048 x 2048) + + header : astropy.io.fits.header + Header from a single extension of a fits file + + xdim : int + Number of columns in the corresponding data (not dq) array, in pixels + + ydim : int + Number of rows in the corresponding data (not dq) array, in pixels + + Returns + ------- + arr : numpy.ndarray + arr, cropped down to the size specified in the header + """ + # Pixel coordinates in the headers are 1-indexed. Subtract 1 to get them into + # python's 0-indexed system + try: + xstart = header['SUBSTRT1'] - 1 + xlen = header['SUBSIZE1'] + ystart = header['SUBSTRT2'] - 1 + ylen = header['SUBSIZE2'] + except KeyError: + # If subarray info is missing from the header, then we don't know which + # part of the dq array to extract. Rather than raising an exception, let's + # extract a portion of the dq array that is centered on the full frame + # array, so that we can still create a preview image later. + logging.info(f"No subarray location information in {header['FILENAME']}. Extracting a portion of the DQ array centered on the full frame.") + arr_ydim, arr_xdim = arr.shape + ystart = (arr_ydim // 2) - (ydim // 2) + xstart = (arr_xdim // 2) - (xdim // 2) + xlen = xdim + ylen = ydim + return arr[ystart: (ystart + ylen), xstart: (xstart + xlen)] + + +def expand_for_i2d(array, xdim, ydim): + """Some file types, like i2d files, contain arrays with sizes that are different than + those specified in the SUBSIZE header keywords. In those cases, we need to expand the + input array from the official size to the actual size. + + Parameters + ---------- + array : numpy.ndarray + 2D DQ array of booleans + + xdim : int + Number of columns in the data whose dimensions we want ``array`` to have. + (e.g. the dimensions of the i2d file) + + ydim : int + Number of rows in the data whose dimensions we want ``array`` to have. + (e.g. the dimensions of the i2d file) + + Returns + ------- + new_array : numpy.ndarray + 2D array with dimensions of (ydim x xdim) + """ + ydim_array, xdim_array = array.shape + if ((ydim_array != ydim) or (xdim_array != xdim)): + if (ydim_array != ydim): + new_array_y = np.zeros((ydim, xdim_array), dtype=bool) # Added rows/cols will be all zeros + y_offset = abs((ydim - ydim_array) // 2) + if (ydim_array < ydim): + new_array_y[y_offset: (y_offset + ydim_array), :] = array + elif (ydim_array > ydim): + new_array_y = array[y_offset: (y_offset + ydim), :] + else: + new_array_y = array + if (xdim_array != xdim): + new_array_x = np.zeros((ydim, xdim), dtype=bool) # Added rows/cols will be all zeros + x_offset = abs((xdim - xdim_array) // 2) + if (xdim_array < xdim): + new_array_x[:, x_offset: (x_offset + xdim_array)] = new_array_y + elif (xdim_array > xdim): + new_array_x = new_array_y[:, x_offset: (x_offset + xdim)] + else: + new_array_x = new_array_y + return new_array_x + else: + return array + +def nan_to_zero(image): + """Set any pixels with a value of NaN to zero + + Parameters + ---------- + image : numpy.ndarray + Array from which NaNs will be removed + + Returns + ------- + image : numpy.ndarray + Input array with NaNs changed to zero + """ + nan = np.isnan(image) + image[nan] = 0 + return image diff --git a/jwql/utils/protect_module.py b/jwql/utils/protect_module.py new file mode 100644 index 000000000..acaaaa365 --- /dev/null +++ b/jwql/utils/protect_module.py @@ -0,0 +1,195 @@ +#! /usr/bin/env python + +""" Protect_module wrapper for the ``jwql`` automation platform. + +This module provides a decorator to protect against the execution of multiple instances of a module. +Intended for when only ONE instance of a module should run at any given time. +Using this decorator, When a module is run, a Lock file is written. The Lock file is removed upon completion of the module. +If there is already a lock file created for that module, the decorator will exit before running module specific code. + +The file will also contain the process id for reference, in case a lock file exists and +the user does not think it should (i.e. module exited unexpectedly without proper closure) +If this scenario arises and the locked module is run again, it will verify that the PID in the current lock file is not actively running. +If the PID is not actively running the module will delete the file, alert the dev team, and carry on with a new lock file/PID. + +This decorator is designed for use with JWQL Monitors and Generate functions. +It should decorate a function called "protected_code" which contains the main functionality where locking is required. + + +Authors +------- + + - Bradley Sappington + +Use +--- + + To protect a module to ensure it is not run multiple times + :: + + import os + from jwql.utils.protect_module import lock_module + + @lock_module + def protected_code(): + # Protected code ensures only 1 instance of module will run at any given time + + # Example code normally in __name == '__main__' check + initialize_code() + my_main_function() + logging_code() + ... + + if __name__ == '__main__': + protected_code() + + +Dependencies +------------ + + None + +References +---------- + + None +""" +import inspect +import getpass +import os +import smtplib +import socket + +from email.mime.text import MIMEText +from functools import wraps +from psutil import pid_exists + +_ALERT_ADDRESS = "jwql@stsci.edu" +_PID_LOCKFILE_KEY = "Process Id = " +ALERT_EMAIL = True # Global to be turned off in tests + + +def _clean_lock_file(filename, module): + locked = True + try: + pid = _retreive_pid_from_lock_file(filename) + notify_str = "" + if not pid_exists(pid): + # if PID associated with the lock file is no longer running, then lock file should not exist, delete it. + if os.path.exists(filename): + try: + os.remove(filename) + notify_str = (f"DELETED FILE `{filename}`\nThis file's associated PID was no longer running.\n\n" + f"This implies the previous instance of {module} may not have completed successfully.\n\n" + f"New instance starting now.") + locked = False + except Exception as e: + notify_str = f"Exception {e} \n {type(e).__name__}\n{e.args}\n\n" + notify_str = notify_str + filename + " delete failed, Please Manually Delete" + return notify_str, locked + except SyntaxError as e: + return str(e), locked + + +def _retreive_pid_from_lock_file(filename): + '''This function retrieves a process ID from a lock file. + + Parameters + ---------- + filename + The filename parameter is a string that represents the name of the lock file from which the process + ID (PID) needs to be retrieved. + + Returns + ------- + int: The process ID (PID) + or + SyntaxError: Indicating that the file should be manually investigated and deleted if appropriate. + + ''' + # Lock file format is established in `jwql/utils/protect_module.py::lock_module` + with open(filename, 'r') as file: + for line in file: + if _PID_LOCKFILE_KEY in line: + number_index = line.index(_PID_LOCKFILE_KEY) + len(_PID_LOCKFILE_KEY) + number = line[number_index:].strip() + return int(number) + + raise SyntaxError(f"No PID found in {filename} - Please manually investigate and delete if appropriate") + + +def _send_notification(message): + '''Sends an email notification to JWQL team alerting them of script actually solving issue (or not) + + Parameters + ---------- + message + The message to be included in the email notification that will be sent out. + + ''' + if ALERT_EMAIL: + user = getpass.getuser() + hostname = socket.gethostname() + deliverer = '{}@stsci.edu'.format(user) + + message = MIMEText(message) + message['Subject'] = f'JWQL ALERT FOR LOCK_MODULE ON {hostname}' + message['From'] = deliverer + message['To'] = _ALERT_ADDRESS + + s = smtplib.SMTP('smtp.stsci.edu') + s.send_message(message) + s.quit() + + +def lock_module(func): + """Decorator to prevent more than 1 instance of a module. + + This function can be used as a decorator to create lock files on python + modules where we only want one instance running at any given time. + More info at top of module + + Parameters + ---------- + func : func + The function to decorate. + + Returns + ------- + wrapped : func + The wrapped function. + """ + + @wraps(func) + def wrapped(*args, **kwargs): + + # Get the module name of the calling method + existing_lock = False + notify_str = "" + frame = inspect.stack()[1] + mod = inspect.getmodule(frame[0]) + module = mod.__file__ + + # remove python suffix if it exists, then append to make testing work properly for instances where .py may not exist + module_lock = module.replace('.py', '.lock') + + if os.path.exists(module_lock): + notify_str, existing_lock = _clean_lock_file(module_lock, module) + if not existing_lock: + if notify_str: + _send_notification(notify_str) + notify_str = "" + + try: + with open(module_lock, "w") as lock_file: + lock_file.write(f"{_PID_LOCKFILE_KEY}{os.getpid()}\n") + return func(*args, **kwargs) + finally: + try: + os.remove(module_lock) + except Exception as e: + notify_str = f"Exception {e} \n {type(e).__name__}\n{e.args}\n" + notify_str = notify_str + module_lock + " delete failed, please investigate cause" + if len(notify_str): + _send_notification(notify_str) + return wrapped diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index 51c36ea67..113dd66c9 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -27,18 +27,33 @@ - JWST TR JWST-STScI-004800, SM-12 """ -import datetime import getpass import glob +import itertools import json +import pyvo as vo import os import re import shutil - +import http import jsonschema +from astropy.io import fits +from astropy.stats import sigma_clipped_stats +from bokeh.io import export_png +from bokeh.models import LinearColorMapper, LogColorMapper +from bokeh.plotting import figure +import numpy as np +from PIL import Image +from selenium import webdriver + from jwql.utils import permissions -from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES_SHORTHAND +from jwql.utils.constants import FILE_AC_CAR_ID_LEN, FILE_AC_O_ID_LEN, FILE_ACT_LEN, \ + FILE_DATETIME_LEN, FILE_EPOCH_LEN, FILE_GUIDESTAR_ATTMPT_LEN_MIN, \ + FILE_GUIDESTAR_ATTMPT_LEN_MAX, FILE_OBS_LEN, FILE_PARALLEL_SEQ_ID_LEN, \ + FILE_PROG_ID_LEN, FILE_SEG_LEN, FILE_SOURCE_ID_LEN, FILE_SUFFIX_TYPES, \ + FILE_TARG_ID_LEN, FILE_VISIT_GRP_LEN, FILE_VISIT_LEN, FILETYPE_WO_STANDARD_SUFFIX, \ + JWST_INSTRUMENT_NAMES_SHORTHAND __location__ = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) @@ -65,8 +80,6 @@ def _validate_config(config_file_dict): "properties": { # List all the possible entries and their types "admin_account": {"type": "string"}, "auth_mast": {"type": "string"}, - "client_id": {"type": "string"}, - "client_secret": {"type": "string"}, "connection_string": {"type": "string"}, "database": { "type": "object", @@ -100,7 +113,7 @@ def _validate_config(config_file_dict): "preview_image_filesystem", "thumbnail_filesystem", "outputs", "jwql_dir", "admin_account", "log_dir", "test_dir", "test_data", "setup_file", "auth_mast", - "client_id", "client_secret", "mast_token"] + "mast_token"] } # Test that the provided config file dict matches the schema @@ -113,6 +126,57 @@ def _validate_config(config_file_dict): ) +def create_png_from_fits(filename, outdir): + """Create and save a png file of the provided file. The file + will be saved with the same filename as the input file, but + with fits replaced by png + + Parameters + ---------- + filename : str + Fits file to be opened and saved as a png + + outdir : str + Output directory to save the png file to + + Returns + ------- + png_file : str + Name of the saved png file + """ + if os.path.isfile(filename): + image = fits.getdata(filename) + ny, nx = image.shape + img_mn, img_med, img_dev = sigma_clipped_stats(image[4: ny - 4, 4: nx - 4]) + + plot = figure(tools='') + plot.x_range.range_padding = plot.y_range.range_padding = 0 + plot.toolbar.logo = None + plot.toolbar_location = None + plot.min_border = 0 + plot.xgrid.visible = False + plot.ygrid.visible = False + + # Create the color mapper that will be used to scale the image + mapper = LogColorMapper(palette='Greys256', low=(img_med - (5 * img_dev)), high=(img_med + (5 * img_dev))) + + # Plot image + imgplot = plot.image(image=[image], x=0, y=0, dw=nx, dh=ny, + color_mapper=mapper, level="image") + + # Turn off the axes, in order to make embedding in another figure easier + plot.xaxis.visible = False + plot.yaxis.visible = False + + # Save the plot in a png + output_filename = os.path.join(outdir, os.path.basename(filename).replace('fits', 'png')) + save_png(plot, filename=output_filename) + permissions.set_permissions(output_filename) + return output_filename + else: + return None + + def get_config(): """Return a dictionary that holds the contents of the ``jwql`` config file. @@ -210,7 +274,7 @@ def download_mast_data(query_results, output_dir): # Set up the https connection server = 'mast.stsci.edu' - conn = httplib.HTTPSConnection(server) + conn = http.client.HTTPSConnection(server) # Dowload the products print('Number of query results: {}'.format(len(query_results))) @@ -275,18 +339,23 @@ def filename_parser(filename): """ filename = os.path.basename(filename) - file_root_name = (len(filename.split('.')) < 2) + split_filename = filename.split('.') + file_root_name = (len(split_filename) < 2) + if file_root_name: + root_name = filename + else: + root_name = split_filename[0] # Stage 1 and 2 filenames # e.g. "jw80500012009_01101_00012_nrcalong_uncal.fits" stage_1_and_2 = \ r"jw" \ - r"(?P\d{5})"\ - r"(?P\d{3})"\ - r"(?P\d{3})"\ - r"_(?P\d{2})"\ - r"(?P\d{1})"\ - r"(?P\w{2})"\ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_OBS_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_VISIT_LEN}" + "})"\ + r"_(?P\d{" + f"{FILE_VISIT_GRP_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_PARALLEL_SEQ_ID_LEN}" + "})"\ + r"(?P\w{" f"{FILE_ACT_LEN}" + "})"\ r"_(?P\d+)"\ r"_(?P((?!_)[\w])+)" @@ -294,23 +363,32 @@ def filename_parser(filename): # e.g. "jw94015002002_02108_00001_mirimage_o002_crf.fits" stage_2c = \ r"jw" \ - r"(?P\d{5})" \ - r"(?P\d{3})" \ - r"(?P\d{3})" \ - r"_(?P\d{2})" \ - r"(?P\d{1})" \ - r"(?P\w{2})" \ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})" \ + r"(?P\d{" + f"{FILE_OBS_LEN}" + "})" \ + r"(?P\d{" + f"{FILE_VISIT_LEN}" + "})" \ + r"_(?P\d{" + f"{FILE_VISIT_GRP_LEN}" + "})" \ + r"(?P\d{" + f"{FILE_PARALLEL_SEQ_ID_LEN}" + "})" \ + r"(?P\w{" + f"{FILE_ACT_LEN}" + "})" \ r"_(?P\d+)" \ r"_(?P((?!_)[\w])+)"\ - r"_(?P(o\d{3}|(c|a|r)\d{4}))" + r"_(?P(o\d{" + f"{FILE_AC_O_ID_LEN}" + r"}|(c|a|r)\d{" + f"{FILE_AC_CAR_ID_LEN}" + "}))" + + # Stage 2 MSA metadata file. Created by APT and loaded in + # assign_wcs. e.g. "jw01118008001_01_msa.fits" + stage_2_msa = \ + r"jw" \ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_OBS_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_VISIT_LEN}" + "})"\ + r"(_.._msa.fits)" # Stage 3 filenames with target ID # e.g. "jw80600-o009_t001_miri_f1130w_i2d.fits" stage_3_target_id = \ r"jw" \ - r"(?P\d{5})"\ - r"-(?P(o\d{3}|(c|a|r)\d{4}))"\ - r"_(?P(t)\d{3})"\ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"-(?P(o\d{" + f"{FILE_AC_O_ID_LEN}" + r"}|(c|a|r)\d{" + f"{FILE_AC_CAR_ID_LEN}" + "}))"\ + r"_(?P(t)\d{" + f"{FILE_TARG_ID_LEN}" + "})"\ r"_(?P(nircam|niriss|nirspec|miri|fgs))"\ r"_(?P((?!_)[\w-])+)" @@ -318,9 +396,9 @@ def filename_parser(filename): # e.g. "jw80600-o009_s00001_miri_f1130w_i2d.fits" stage_3_source_id = \ r"jw" \ - r"(?P\d{5})"\ - r"-(?P(o\d{3}|(c|a|r)\d{4}))"\ - r"_(?P(s)\d{5})"\ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"-(?P(o\d{" + f"{FILE_AC_O_ID_LEN}" + r"}|(c|a|r)\d{" + f"{FILE_AC_CAR_ID_LEN}" + "}))"\ + r"_(?P(s)\d{" + f"{FILE_SOURCE_ID_LEN}" + "})"\ r"_(?P(nircam|niriss|nirspec|miri|fgs))"\ r"_(?P((?!_)[\w-])+)" @@ -328,10 +406,10 @@ def filename_parser(filename): # e.g. "jw80600-o009_t001-epoch1_miri_f1130w_i2d.fits" stage_3_target_id_epoch = \ r"jw" \ - r"(?P\d{5})"\ - r"-(?P(o\d{3}|(c|a|r)\d{4}))"\ - r"_(?P(t)\d{3})"\ - r"-epoch(?P\d{1})"\ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"-(?P(o\d{" + f"{FILE_AC_O_ID_LEN}" + r"}|(c|a|r)\d{" + f"{FILE_AC_CAR_ID_LEN}" + "}))"\ + r"_(?P(t)\d{" + f"{FILE_TARG_ID_LEN}" + "})"\ + r"-epoch(?P\d{" + f"{FILE_EPOCH_LEN}" + "})"\ r"_(?P(nircam|niriss|nirspec|miri|fgs))"\ r"_(?P((?!_)[\w-])+)" @@ -339,10 +417,10 @@ def filename_parser(filename): # e.g. "jw80600-o009_s00001-epoch1_miri_f1130w_i2d.fits" stage_3_source_id_epoch = \ r"jw" \ - r"(?P\d{5})"\ - r"-(?P(o\d{3}|(c|a|r)\d{4}))"\ - r"_(?P(s)\d{5})"\ - r"-epoch(?P\d{1})"\ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"-(?P(o\d{" + f"{FILE_AC_O_ID_LEN}" + r"}|(c|a|r)\d{" + f"{FILE_AC_CAR_ID_LEN}" + "}))"\ + r"_(?P(s)\d{" + f"{FILE_SOURCE_ID_LEN}" + "})"\ + r"-epoch(?P\d{" + f"{FILE_EPOCH_LEN}" + "})"\ r"_(?P(nircam|niriss|nirspec|miri|fgs))"\ r"_(?P((?!_)[\w-])+)" @@ -350,54 +428,86 @@ def filename_parser(filename): # e.g. "jw00733003001_02101_00002-seg001_nrs1_rate.fits" time_series = \ r"jw" \ - r"(?P\d{5})"\ - r"(?P\d{3})"\ - r"(?P\d{3})"\ - r"_(?P\d{2})"\ - r"(?P\d{1})"\ - r"(?P\w{2})"\ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_OBS_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_VISIT_LEN}" + "})"\ + r"_(?P\d{" + f"{FILE_VISIT_GRP_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_PARALLEL_SEQ_ID_LEN}" + "})"\ + r"(?P\w{" + f"{FILE_ACT_LEN}" + "})"\ r"_(?P\d+)"\ - r"-seg(?P\d{3})"\ - r"_(?P\w+)" + r"-seg(?P\d{" + f"{FILE_SEG_LEN}" + "})"\ + r"_(?P((?!_)[\w])+)" + + # Time series filenames for stage 2c + # e.g. "jw00733003001_02101_00002-seg001_nrs1_o001_crfints.fits" + time_series_2c = \ + r"jw" \ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_OBS_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_VISIT_LEN}" + "})"\ + r"_(?P\d{" + f"{FILE_VISIT_GRP_LEN}" + "})"\ + r"(?P\d{" + f"{FILE_PARALLEL_SEQ_ID_LEN}" + "})"\ + r"(?P\w{" + f"{FILE_ACT_LEN}" + "})"\ + r"_(?P\d+)"\ + r"-seg(?P\d{" + f"{FILE_SEG_LEN}" + "})"\ + r"_(?P((?!_)[\w])+)"\ + r"_(?P(o\d{" + f"{FILE_AC_O_ID_LEN}" + r"}|(c|a|r)\d{" + f"{FILE_AC_CAR_ID_LEN}" + "}))" # Guider filenames # e.g. "jw00729011001_gs-id_1_image_cal.fits" or # "jw00799003001_gs-acq1_2019154181705_stream.fits" guider = \ r"jw" \ - r"(?P\d{5})" \ - r"(?P\d{3})" \ - r"(?P\d{3})" \ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})" \ + r"(?P\d{" + f"{FILE_OBS_LEN}" + "})" \ + r"(?P\d{" + f"{FILE_VISIT_LEN}" + "})" \ r"_gs-(?P(id|acq1|acq2|track|fg))" \ - r"_((?P\d{13})|(?P\d{1}))" + r"_((?P\d{" + f"{FILE_DATETIME_LEN}" + r"})|(?P\d{" + f"{FILE_GUIDESTAR_ATTMPT_LEN_MIN},{FILE_GUIDESTAR_ATTMPT_LEN_MAX}" + "}))" + + # Segment guider filenames + # e.g. "jw01118005001_gs-fg_2022150070312-seg002_uncal.fits" + guider_segment = \ + r"jw" \ + r"(?P\d{" + f"{FILE_PROG_ID_LEN}" + "})" \ + r"(?P\d{" + f"{FILE_OBS_LEN}" + "})" \ + r"(?P\d{" + f"{FILE_VISIT_LEN}" + "})" \ + r"_gs-(?P(id|acq1|acq2|track|fg))" \ + r"_((?P\d{" + f"{FILE_DATETIME_LEN}" + r"})|(?P\d{" + f"{FILE_GUIDESTAR_ATTMPT_LEN_MIN},{FILE_GUIDESTAR_ATTMPT_LEN_MAX}" + "}))" \ + r"-seg(?P\d{" + f"{FILE_SEG_LEN}" + "})" # Build list of filename types filename_types = [ stage_1_and_2, stage_2c, + stage_2_msa, stage_3_target_id, stage_3_source_id, stage_3_target_id_epoch, stage_3_source_id_epoch, time_series, - guider] + time_series_2c, + guider, + guider_segment] filename_type_names = [ 'stage_1_and_2', 'stage_2c', + 'stage_2_msa', 'stage_3_target_id', 'stage_3_source_id', 'stage_3_target_id_epoch', 'stage_3_source_id_epoch', 'time_series', - 'guider' + 'time_series_2c', + 'guider', + 'guider_segment' ] # Try to parse the filename for filename_type, filename_type_name in zip(filename_types, filename_type_names): - # If full filename, try using suffix - if not file_root_name: + # If full filename, try using suffix, except for *msa.fits files + if not file_root_name and FILETYPE_WO_STANDARD_SUFFIX not in filename: filename_type += r"_(?P{}).*".format('|'.join(FILE_SUFFIX_TYPES)) # If not, make sure the provided regex matches the entire filename root else: @@ -420,12 +530,25 @@ def filename_parser(filename): # Also, add the instrument if not already there if 'instrument' not in filename_dict.keys(): - if name_match == 'guider': + if name_match in ['guider', 'guider_segment']: filename_dict['instrument'] = 'fgs' elif 'detector' in filename_dict.keys(): filename_dict['instrument'] = JWST_INSTRUMENT_NAMES_SHORTHAND[ filename_dict['detector'][:3].lower() ] + elif name_match == 'stage_2_msa': + filename_dict['instrument'] = 'nirspec' + + # Also add detector, root name, and group root name + root_name = re.sub(rf"_{filename_dict.get('suffix', '')}$", '', root_name) + root_name = re.sub(rf"_{filename_dict.get('ac_id', '')}$", '', root_name) + filename_dict['file_root'] = root_name + if 'detector' not in filename_dict.keys(): + filename_dict['detector'] = 'Unknown' + filename_dict['group_root'] = root_name + else: + group_root = re.sub(rf"_{filename_dict['detector']}$", '', root_name) + filename_dict['group_root'] = group_root # Raise error if unable to parse the filename except AttributeError: @@ -473,7 +596,7 @@ def filesystem_path(filename, check_existence=True, search=None): if len(filenames_found) > 0: filename = os.path.basename(filenames_found[0]) else: - raise FileNotFoundError('{} did not yeild any files in predicted location {}'.format(search, full_subdir)) + raise FileNotFoundError('{} did not yield any files in predicted location {}'.format(search, full_subdir)) full_path = os.path.join(subdir1, subdir2, filename) @@ -514,6 +637,30 @@ def get_base_url(): return base_url +def get_rootnames_for_instrument_proposal(instrument, proposal): + """Return a list of rootnames for the given instrument and proposal + + Parameters + ---------- + instrument : str + Name of the JWST instrument, with first letter capitalized + (e.g. ``Fgs``) + + proposal : int or str + Proposal ID number + + Returns + ------- + rootnames : list + List of rootnames for the given instrument and proposal number + """ + tap_service = vo.dal.TAPService("https://vao.stsci.edu/caomtap/tapservice.aspx") + tap_results = tap_service.search(f"select observationID from dbo.CaomObservation where collection='JWST' and maxLevel=2 and insName like '{instrument.lower()}%' and prpID='{int(proposal)}'") + prop_table = tap_results.to_table() + rootnames = prop_table['observationID'].data + return rootnames.compressed() + + def check_config_for_key(key): """Check that the config.json file contains the specified key and that the entry is not empty @@ -526,18 +673,36 @@ def check_config_for_key(key): try: get_config()[key] except KeyError: - raise KeyError( - 'The key `{}` is not present in config.json. Please add it.'.format(key) + - ' See the relevant wiki page (https://github.com/spacetelescope/' + - 'jwql/wiki/Config-file) for more information.' - ) + msg = 'The key `{}` is not present in config.json. Please add it.'.format(key) + msg += ' See the relevant wiki page (https://github.com/spacetelescope/' + msg += 'jwql/wiki/Config-file) for more information.' + raise KeyError(msg) if get_config()[key] == "": - raise ValueError( - 'Please complete the `{}` field in your config.json. '.format(key) + - ' See the relevant wiki page (https://github.com/spacetelescope/' + - 'jwql/wiki/Config-file) for more information.' - ) + msg = 'Please complete the `{}` field in your config.json. '.format(key) + msg += ' See the relevant wiki page (https://github.com/spacetelescope/' + msg += 'jwql/wiki/Config-file) for more information.' + raise ValueError(msg) + + +def delete_non_rate_thumbnails(extensions=['_rate_', '_dark']): + """This script will go through all the thumbnail directories and delete all + thumbnails that do not contain the given extensions. We currently create thumbnails + using only rate.fits and dark.fits files, so the default is to keep only those. + + Parameters + ---------- + extension : list + If a thumbnail filename contains any of these strings, it will not be deleted + """ + base_dir = get_config()["thumbnail_filesystem"] + dir_list = sorted(glob.glob(os.path.join(base_dir, 'jw*'))) + + for dirname in dir_list: + files = glob.glob(os.path.join(dirname, '*.thumb')) + for file in files: + if not any([x in file for x in extensions]): + os.remove(file) def query_format(string): @@ -553,3 +718,88 @@ def query_unformat(string): unsplit_string = string.replace(" ", "_") return unsplit_string + + +def read_png(filename): + """Open the given png file and return as a 3D numpy array + + Parameters + ---------- + filename : str + png file to be opened + + Returns + ------- + data : numpy.ndarray + 3D array representation of the data in the png file + """ + if os.path.isfile(filename): + rgba_img = Image.open(filename).convert('RGBA') + xdim, ydim = rgba_img.size + + # Create an array representation for the image, filled with + # dummy data to begin with + img = np.empty((ydim, xdim), dtype=np.uint32) + + # Create a layer/RGBA" version with a set of 4, 8-bit layers. + # We will work with the data using 'view', and our changes + # will propagate back into the 2D 'img' version, which is + # what we will end up returning. + view = img.view(dtype=np.uint8).reshape((ydim, xdim, 4)) + + # Copy the RGBA image into view, flipping it so it comes right-side up + # with a lower-left origin + view[:, :, :] = np.flipud(np.asarray(rgba_img)) + else: + view = None + # Return the 2D version + return img + + +def save_png(fig, filename=''): + """Starting with selenium version 4.10.0, our testing has shown that on the JWQL + servers, we need to specify an instance of a web driver when exporting a Bokeh + figure as a png. This is a wrapper function that creates the web driver instance + and calls Bokeh's export_png function. + + Parameters + ---------- + fig : bokeh.plotting.figure + Bokeh figure to be saved as a png + + filename : str + Filename to use for the png file + """ + options = webdriver.FirefoxOptions() + options.add_argument('-headless') + driver = webdriver.Firefox(options=options) + export_png(fig, filename=filename, webdriver=driver) + driver.quit() + + +def grouper(iterable, chunksize): + """ + Take a list of items (iterable), and group it into chunks of chunksize, with the + last chunk being any remaining items. This allows you to batch-iterate through a + potentially very long list without missing any items, and where each individual + iteration can involve a much smaller number of files. Particularly useful for + operations that you want to execute in batches, but don't want the batches to be too + long. + + Examples + -------- + + grouper([1, 2, 3, 4, 5], 2) + produces + (1, 2), (3, 4), (5, ) + + grouper([1, 2, 3, 4, 5], 6) + produces + (1, 2, 3, 4, 5) + """ + it = iter(iterable) + while True: + chunk = tuple(itertools.islice(it, chunksize)) + if not chunk: + return + yield chunk diff --git a/jwql/website/apps/jwql/admin.py b/jwql/website/apps/jwql/admin.py index 2690dc34c..f9525cef7 100644 --- a/jwql/website/apps/jwql/admin.py +++ b/jwql/website/apps/jwql/admin.py @@ -1,7 +1,5 @@ """Customizes the ``jwql`` web app administrative page. -** CURRENTLY NOT IN USE ** - Used to customize django's admin interface, and how the data contained in specific models is portrayed. @@ -9,6 +7,8 @@ ------- - Lauren Chambers + - Bryan Hilbert + - Brad Sappington References ---------- @@ -19,15 +19,37 @@ from django.contrib import admin -from .models import ImageData +from .models import Archive, Observation, Proposal, RootFileInfo, Anomalies + + +@admin.register(Archive) +class ArchiveAdmin(admin.ModelAdmin): + pass + + +@admin.register(Proposal) +class ProposalAdmin(admin.ModelAdmin): + list_display = ('archive', 'prop_id', 'category') + list_filter = ('archive', 'category') + + +@admin.register(Observation) +class ObservationAdmin(admin.ModelAdmin): + list_display = ('proposal', 'obsnum') + list_filter = ('proposal', 'obsstart', 'exptypes') -class ImageDataAdmin(admin.ModelAdmin): - # fieldsets = [('Filepath', {'fields': ['filepath']}), - # ('Instrument', {'fields': ['inst']}), - # ('Date information', {'fields': ['pub_date']})] - list_display = ('filename', 'inst', 'pub_date') - list_filter = ['pub_date'] +@admin.register(RootFileInfo) +class RootFileInfoAdmin(admin.ModelAdmin): + list_display = ('root_name', 'obsnum', 'proposal', 'instrument', 'viewed', 'filter', 'aperture', 'detector', 'read_patt_num', 'read_patt', 'grating', 'subarray', 'pupil', 'exp_type', 'expstart') + list_filter = ('viewed', 'instrument', 'proposal') -admin.site.register(ImageData, ImageDataAdmin) +@admin.register(Anomalies) +class AnomaliesAdmin(admin.ModelAdmin): + list_display = ('root_file_info', 'flag_date', 'user', 'cosmic_ray_shower', 'diffraction_spike', 'excessive_saturation', 'guidestar_failure', 'persistence', 'crosstalk', 'data_transfer_error', + 'ghost', 'snowball', 'column_pull_up', 'column_pull_down', 'dominant_msa_leakage', 'dragons_breath', 'mrs_glow', 'mrs_zipper', 'internal_reflection', 'optical_short', 'row_pull_up', + 'row_pull_down', 'lrs_contamination', 'tree_rings', 'scattered_light', 'claws', 'wisps', 'tilt_event', 'light_saber', 'other') + list_filter = ('flag_date', 'user', 'cosmic_ray_shower', 'diffraction_spike', 'excessive_saturation', 'guidestar_failure', 'persistence', 'crosstalk', 'data_transfer_error', + 'ghost', 'snowball', 'column_pull_up', 'column_pull_down', 'dominant_msa_leakage', 'dragons_breath', 'mrs_glow', 'mrs_zipper', 'internal_reflection', 'optical_short', 'row_pull_up', + 'row_pull_down', 'lrs_contamination', 'tree_rings', 'scattered_light', 'claws', 'wisps', 'tilt_event', 'light_saber', 'other', 'root_file_info') diff --git a/jwql/website/apps/jwql/anomaly_db_transfer.py b/jwql/website/apps/jwql/anomaly_db_transfer.py new file mode 100755 index 000000000..6c68f1ceb --- /dev/null +++ b/jwql/website/apps/jwql/anomaly_db_transfer.py @@ -0,0 +1,101 @@ +#! /usr/bin/env python + +"""Script to transfer postgres anomaly data to django models + +Authors +------- + + - Bradley Sappington + +Use +--- + + This module is called as follows: + :: + $ python anomaly_db_transfer.py + + +Dependencies +------------ + The user must have a configuration file named ``config.json`` + placed in the ``jwql`` directory. +""" + +import logging +import django +import os +import datetime + +# These lines are needed in order to use the Django models in a standalone +# script (as opposed to code run as a result of a webpage request). If these +# lines are not run, the script will crash when attempting to import the +# Django models in the line below. +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") +django.setup() +from django.conf import settings +from django.core.exceptions import ObjectDoesNotExist +from django.utils.timezone import make_aware + +from jwql.database import database_interface as di +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils.monitor_utils import initialize_instrument_monitor +from jwql.utils.utils import filesystem_path, get_config +from jwql.website.apps.jwql.models import RootFileInfo, Anomalies + + + + + +@log_info +@log_fail +def transfer_anomalies(): + """Update the Django anomalies model with all information in the existing postgres database. + + """ + instruments = ['nircam', 'miri', 'nirspec', 'niriss', 'fgs'] + for instrument in instruments: + # Get the anomalies for this instrument + table = getattr(di, '{}Anomaly'.format(JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()])) + query = di.session.query(table) + logging.info('anomalies for {} are: {}'.format(instrument, table.columns)) + table_keys = ['id', 'root_file_info', 'flag_date', 'user'] + table_keys += table.columns + table_keys = list(map(lambda x: x.lower(), table_keys)) + updated = 0 + + with di.engine.connect() as connection: + result = connection.execute(query.statement) + rows = result.fetchall() + + for rowx, row in enumerate(rows): + anomaly_dict = {} + for ix, value in enumerate(row): + if (table_keys[ix] == 'flag_date'): + #avoid warning for native DateTime when timezone is active + value = make_aware(value) + anomaly_dict[table_keys[ix]] = value + + root_file_info_name = anomaly_dict['root_file_info'] + try: + root_file_info_instance = RootFileInfo.objects.get(root_name=root_file_info_name) + except ObjectDoesNotExist: + logging.info('No root_file_info for {}'.format(root_file_info_name)) + continue + + try: + del(anomaly_dict['id']) + del(anomaly_dict['root_file_info']) + anomalies, anomaly_created = Anomalies.objects.update_or_create(root_file_info=root_file_info_instance, + defaults=anomaly_dict) + if anomaly_created: + updated += 1 + except Exception as e: + logging.warning('Failed to create {} with exception {}'.format(root_file_info_name, e)) + logging.info('Transferred {} anomalies for {}'.format(updated, instrument)) + + +if __name__ == '__main__': + module = os.path.basename(__file__).strip('.py') + start_time, log_file = initialize_instrument_monitor(module) + transfer_anomalies() diff --git a/jwql/website/apps/jwql/api_views.py b/jwql/website/apps/jwql/api_views.py index fb41c4c3b..e5303e7a0 100644 --- a/jwql/website/apps/jwql/api_views.py +++ b/jwql/website/apps/jwql/api_views.py @@ -27,6 +27,7 @@ - Matthew Bourque - Teagan King + - Melanie Clarke Use --- @@ -50,12 +51,11 @@ from .data_containers import get_filenames_by_proposal from .data_containers import get_filenames_by_rootname from .data_containers import get_instrument_proposals -from .data_containers import get_preview_images_by_instrument +from .data_containers import get_instrument_looks from .data_containers import get_preview_images_by_proposal from .data_containers import get_preview_images_by_rootname -from .data_containers import get_thumbnails_by_instrument from .data_containers import get_thumbnails_by_proposal -from .data_containers import get_thumbnails_by_rootname +from .data_containers import get_thumbnail_by_rootname def all_proposals(request): @@ -139,28 +139,44 @@ def instrument_proposals(request, inst): return JsonResponse({'proposals': proposals}, json_dumps_params={'indent': 2}) -def preview_images_by_instrument(request, inst): - """Return a list of available preview images in the filesystem for - the given instrument. +def instrument_looks(request, inst, status=None): + """Return a table of looks information for the given instrument. + + 'Viewed' indicates whether an observation is new or has been reviewed + for QA. In addition to 'filename', and 'viewed', observation + descriptors from the Django models may be added to the table. Keys + are specified by instrument in the REPORT_KEYS_PER_INSTRUMENT constant. Parameters ---------- request : HttpRequest object - Incoming request from the webpage + Incoming request from the webpage. inst : str - The instrument of interest. The name of the instrument must - mach one of the following: (``nircam``, ``NIRCam``, ``niriss``, - ``NIRISS``, ``nirspec``, ``NIRSpec``, ``miri``, ``MIRI``, - ``fgs``, ``FGS``). + The JWST instrument of interest. + status : str, optional + If set to None, all viewed values are returned. If set to + 'viewed', only viewed data is returned. If set to 'new', only + new data is returned. Returns ------- - JsonResponse object - Outgoing response sent to the webpage + JsonResponse + Outgoing response sent to the webpage, depending on return_type. """ + # get all observation looks from file info model + # and join with observation descriptors + keys, looks = get_instrument_looks(inst, look=status) - preview_images = get_preview_images_by_instrument(inst) - return JsonResponse({'preview_images': preview_images}, json_dumps_params={'indent': 2}) + # return results by api key + if status is None: + status = 'looks' + + response = JsonResponse({'instrument': inst, + 'keys': keys, + 'type': status, + status: looks}, + json_dumps_params={'indent': 2}) + return response def preview_images_by_proposal(request, proposal): @@ -205,30 +221,6 @@ def preview_images_by_rootname(request, rootname): return JsonResponse({'preview_images': preview_images}, json_dumps_params={'indent': 2}) -def thumbnails_by_instrument(request, inst): - """Return a list of available thumbnails in the filesystem for the - given instrument. - - Parameters - ---------- - request : HttpRequest object - Incoming request from the webpage - inst : str - The instrument of interest. The name of the instrument must - match one of the following: (``nircam``, ``NIRCam``, ``niriss``, - ``NIRISS``, ``nirspec``, ``NIRSpec``, ``miri``, ``MIRI``, - ``fgs``, ``FGS``). - - Returns - ------- - JsonResponse object - Outgoing response sent to the webpage - """ - - thumbnails = get_thumbnails_by_instrument(inst) - return JsonResponse({'thumbnails': thumbnails}, json_dumps_params={'indent': 2}) - - def thumbnails_by_proposal(request, proposal): """Return a list of available thumbnails in the filesystem for the given ``proposal``. @@ -250,8 +242,8 @@ def thumbnails_by_proposal(request, proposal): return JsonResponse({'thumbnails': thumbnails}, json_dumps_params={'indent': 2}) -def thumbnails_by_rootname(request, rootname): - """Return a list of available thumbnails in the filesystem for the +def thumbnail_by_rootname(request, rootname): + """Return the best available thumbnail in the filesystem for the given ``rootname``. Parameters @@ -267,5 +259,5 @@ def thumbnails_by_rootname(request, rootname): Outgoing response sent to the webpage """ - thumbnails = get_thumbnails_by_rootname(rootname) - return JsonResponse({'thumbnails': thumbnails}, json_dumps_params={'indent': 2}) + thumbnail = get_thumbnail_by_rootname(rootname) + return JsonResponse({'thumbnails': thumbnail}, json_dumps_params={'indent': 2}) diff --git a/jwql/website/apps/jwql/apps.py b/jwql/website/apps/jwql/apps.py deleted file mode 100644 index bd31e1913..000000000 --- a/jwql/website/apps/jwql/apps.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Customizes the ``jwql`` app settings. - -** CURRENTLY NOT IN USE ** - -Optionally defines an ``AppConfig`` class that can be called in -``INSTALLED_APPS`` in settings.py to configure the web app. - -Authors -------- - - - Lauren Chambers - -Use ---- - - This module is called in ``settings.py`` as such: - :: - INSTALLED_APPS = ['apps.jwql.PlotsExampleConfig', - ... - ] - -References ----------- - For more information please see: - ``https://docs.djangoproject.com/en/2.0/ref/applications/`` -""" - -from django.apps import AppConfig - - -class PlotsExampleConfig(AppConfig): - name = 'jwql' diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py new file mode 100755 index 000000000..071df24a8 --- /dev/null +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -0,0 +1,529 @@ +#! /usr/bin/env python + +"""Script that can be used to query MAST and return basic info +about all proposals. This information is used to help populate +the instrument archive pages + +Authors +------- + + - Bryan Hilbert + - Bradley Sappington + +Use +--- + + This module is called as follows: + :: + + from jwql.websites.apps.jwql.archvie_database_update import get_updates + instrument = 'nircam' + get_updates(insturument) + + can be run from command line to add new elements to the django database + $ python archive_database_update.py + + Use the '--fill_empty' argument to provide a model and field. Updates ALL fields for any model with empty/null/0 specified field + $ python archive_database_update.py --fill_empty rootfileinfo expstart + WARNING: Not all fields will be populated by all model objects. This will result in updates that may not be necessary. + While this will not disturb the data, it has the potential to increase run time. + Select the field that is most pertient to the models you need updated minimize run time + + Use the 'update' argument to update every rootfileinfo data model with the most complete information from MAST + $ python archive_database_update.py --update + WARNING: THIS WILL TAKE A LONG TIME + + +Dependencies +------------ + The user must have a configuration file named ``config.json`` + placed in the ``jwql`` directory. +""" + +import logging +import os +import argparse + +import numpy as np +import django + +from django.apps import apps +from jwql.utils.protect_module import lock_module + +# These lines are needed in order to use the Django models in a standalone +# script (as opposed to code run as a result of a webpage request). If these +# lines are not run, the script will crash when attempting to import the +# Django models in the line below. +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") +django.setup() + +from jwql.website.apps.jwql.models import Archive, Observation, Proposal, RootFileInfo # noqa +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE # noqa +from jwql.utils.logging_functions import log_info, log_fail # noqa +from jwql.utils.monitor_utils import initialize_instrument_monitor # noqa +from jwql.utils.constants import MAST_QUERY_LIMIT # noqa +from jwql.utils.utils import filename_parser, filesystem_path, get_config # noqa +from jwql.website.apps.jwql.data_containers import create_archived_proposals_context # noqa +from jwql.website.apps.jwql.data_containers import get_instrument_proposals, get_filenames_by_instrument # noqa +from jwql.website.apps.jwql.data_containers import get_proposal_info, mast_query_filenames_by_instrument, mast_query_by_rootname # noqa + +FILESYSTEM = get_config()['filesystem'] + + +@log_info +@log_fail +def get_updates(update_database): + """Generate the page listing all archived proposals in the database + + Parameters + ---------- + update_database : bool + true: run updates on existing rootfilename entries + false: only create new entries + + inst : str + Name of JWST instrument + """ + instruments = ['nircam', 'miri', 'nirspec', 'niriss', 'fgs'] + for inst in instruments: + logging.info(f'Updating database for {inst} archive page.') + + # Ensure the instrument is correctly capitalized + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + + # Dictionary to hold summary information for all proposals + all_proposals = get_instrument_proposals(inst) + + # Get list of all files for the given instrument + for proposal in all_proposals: + # Get lists of all public and proprietary files for the program + filenames_public, metadata_public, filenames_proprietary, metadata_proprietary = get_all_possible_filenames_for_proposal(inst, proposal) + # Find the location in the filesystem for all files + filepaths_public = files_in_filesystem(filenames_public, 'public') + filepaths_proprietary = files_in_filesystem(filenames_proprietary, 'proprietary') + filenames = filepaths_public + filepaths_proprietary + + # There is one and only one category for the proposal, so just take the first. + proposal_category = '' + if len(metadata_public['category']): + proposal_category = metadata_public['category'][0] + elif len(metadata_proprietary['category']): + proposal_category = metadata_proprietary['category'][0] + + # Get set of unique rootnames + all_rootnames = set(['_'.join(f.split('/')[-1].split('_')[:-1]) for f in filenames]) + rootnames = [] + for rootname in all_rootnames: + filename_dict = filename_parser(rootname) + + # Weed out file types that are not supported by generate_preview_images + if 'stage_3' not in filename_dict['filename_type']: + rootnames.append(rootname) + + if len(filenames) > 0: + + # Gather information about the proposals for the given instrument + proposal_info = get_proposal_info(filenames) + + # Each observation number in each proposal can have a list of exp_types (e.g. NRC_TACQ, NRC_IMAGE) + for obsnum in set(proposal_info['observation_nums']): + # Find the public entries for the observation and get the associated exp_types + public_obs = np.array(metadata_public['observtn']) + match_pub = public_obs == int(obsnum) + public_exptypes = np.array(metadata_public['exp_type']) + exp_types = list(set(public_exptypes[match_pub])) + + # Find the proprietary entries for the observation, get the associated exp_types, and + # combine with the public values + prop_obs = np.array(metadata_proprietary['observtn']) + match_prop = prop_obs == int(obsnum) + prop_exptypes = np.array(metadata_proprietary['exp_type']) + exp_types = list(set(exp_types + list(set(prop_exptypes[match_prop])))) + + # Find the starting and ending dates for the observation + all_start_dates = np.array(metadata_public['expstart'])[match_pub] + all_start_dates = np.append(all_start_dates, np.array(metadata_proprietary['expstart'])[match_prop]) + + starting_date = np.min(all_start_dates) + all_end_dates = np.array(metadata_public['expend'])[match_pub] + all_end_dates = np.append(all_end_dates, np.array(metadata_proprietary['expend'])[match_prop]) + latest_date = np.max(all_end_dates) + + # Get the number of files in the observation + propobs = f'jw{int(proposal):05}{obsnum}' + obsfiles = [f for f in rootnames if propobs in f] + + # Update the appropriate database table + update_database_table(update_database, inst, proposal, obsnum, proposal_info['thumbnail_paths'][0], obsfiles, + exp_types, starting_date, latest_date, proposal_category) + + create_archived_proposals_context(inst) + + +def get_all_possible_filenames_for_proposal(instrument, proposal_num): + """Wrapper around a MAST query for filenames from a given instrument/proposal + + Parameters + ---------- + instrument : str + JWST instrument, mixed-case e.g. NIRCam + + proposal_num : str + Proposal number to search for + + Returns + ------- + public: list + A list of publicly-available filenames + + public_meta : dict + Dictionary of other attributes returned from MAST for public data. Keys are the attribute names + e.g. 'exptime', and values are lists of the value for each filename. e.g. ['59867.6, 59867.601'] + + proprietary list + A list of filenames from proprietary programs + + proprietary_meta : dict + Dictionary of other attributes returned from MAST for proporietary programs. Keys are the attribute names + e.g. 'exptime', and values are lists of the value for each filename. e.g. ['59867.6, 59867.601'] + """ + filename_query = mast_query_filenames_by_instrument(instrument, proposal_num, + other_columns=['exp_type', 'observtn', 'expstart', 'expend', 'category']) + + # Check the number of files returned by the MAST query. MAST has a limit of 50,000 rows in + # the returned result. If we hit that limit, then we are most likely not getting back all of + # the files. In such a case, we will have to start querying by observation or some other property. + if len(filename_query['data']) >= MAST_QUERY_LIMIT: + raise ValueError((f'WARNING! MAST query limit of {MAST_QUERY_LIMIT} entries has been reached for {instrument} PID {proposal_num}. ' + 'This means we are not getting a complete list of files. The query must be broken up into multuple queries.')) + + public, public_meta = get_filenames_by_instrument(instrument, proposal_num, restriction='public', + query_response=filename_query, + other_columns=['exp_type', 'observtn', 'expstart', 'expend', 'category']) + proprietary, proprietary_meta = get_filenames_by_instrument(instrument, proposal_num, restriction='proprietary', + query_response=filename_query, + other_columns=['exp_type', 'observtn', 'expstart', 'expend', 'category']) + return public, public_meta, proprietary, proprietary_meta + + +def files_in_filesystem(files, permission_type): + """Determine locations in the filesystem for the input files + + Parameters + ---------- + files : list + List of filenames from MAST query + + permission_type : str + Permission level of the input files: 'public' or 'proprietary' + + Return + ------ + filenames : list + List of full paths within the filesystem for the input files + """ + if permission_type not in ['public', 'proprietary']: + raise ValueError('permission type needs to be either "public" or "proprietary"') + + filenames = [] + for filename in files: + try: + relative_filepath = filesystem_path(filename, check_existence=False) + full_filepath = os.path.join(FILESYSTEM, permission_type, relative_filepath) + filenames.append(full_filepath) + except ValueError: + print('Unable to determine filepath for {}'.format(filename)) + return filenames + + +def update_database_table(update, instrument, prop, obs, thumbnail, obsfiles, types, startdate, enddate, proposal_category): + """Update the database tables that contain info about proposals and observations, via Django models. + + Parameters + ---------- + update : bool + true: run updates on existing rootfilename entries + false: only create new entries + + instrument : str + Instrument name + + prop : str + Proposal ID. 5-digit string + + obs : str + Observation number. 3-digit string + + thumbnail : str + Full path to the thumbnail image for the proposal + + obsfiles : list + list of file rootnames in the observation + + types : list + List of exposure types of the data in the observation + + startdate : float + Date of the beginning of the observation in MJD + + enddate : float + Date of the ending of the observation in MJD + + proposal_category : str + category name + """ + + # Check to see if the required Archive entry exists, and create it if it doesn't + archive_instance, archive_created = Archive.objects.get_or_create(instrument=instrument) + if archive_created: + logging.info(f'No existing entries for Archive: {instrument}. Creating.') + + # Check to see if the required Proposal entry exists, and create it if it doesn't + prop_instance, prop_created = Proposal.objects.get_or_create(prop_id=prop, archive=archive_instance) + if prop_created: + logging.info(f'No existing entries for Proposal: {prop}. Creating.') + + # Update the proposal instance with the thumbnail path + prop_instance.thumbnail_path = thumbnail + prop_instance.category = proposal_category + prop_instance.save(update_fields=['thumbnail_path', 'category']) + + # Now that the Archive and Proposal instances are sorted, get or create the + # Observation instance + obs_instance, obs_created = Observation.objects.get_or_create(obsnum=obs, + proposal=prop_instance, + proposal__archive=archive_instance) + + # Update the Observation info. Note that in this case, if the Observation entry + # already existed we are overwriting the old values for number of files and dates. + # This is done in case new files have appeared in MAST since the last run of + # this script (i.e. the pipeline wasn't finished at the time of the last run) + obs_instance.number_of_files = len(obsfiles) + obs_instance.obsstart = startdate + obs_instance.obsend = enddate + + # If the Observation instance was just created, then set the exptype list with + # the input list. If the Observation instance already existed, then update the + # exptype list by adding the new entries to the existing ones. + if obs_created: + logging.info((f'No existing entries for Observation: {instrument}, PID {prop}, Obs {obs} found. Creating. ' + 'Updating number of files, start/end dates, and exp_type list')) + obs_instance.exptypes = ','.join(types) + else: + if obs_instance.exptypes == '': + obs_instance.exptypes = ','.join(types) + else: + existing_exps = obs_instance.exptypes.split(',') + existing_exps.extend(types) + existing_exps = sorted(list(set(existing_exps))) + new_exp_list = ','.join(existing_exps) + obs_instance.exptypes = new_exp_list + obs_instance.save(update_fields=['number_of_files', 'obsstart', 'obsend', 'exptypes']) + + # Get all unsaved root names in the Observation to store in the database + nr_files_created = 0 + for file in obsfiles: + try: + root_file_info_instance, rfi_created = RootFileInfo.objects.get_or_create(root_name=file, + instrument=instrument, + obsnum=obs_instance, + proposal=prop) + if update or rfi_created: + # Updating defaults only on update or creation to prevent call to mast_query_by_rootname on every file name. + defaults_dict = mast_query_by_rootname(instrument, file) + + defaults = dict(filter=defaults_dict.get('filter', ''), + detector=defaults_dict.get('detector', ''), + exp_type=defaults_dict.get('exp_type', ''), + read_patt=defaults_dict.get('readpatt', ''), + grating=defaults_dict.get('grating', ''), + read_patt_num=defaults_dict.get('patt_num', 0), + aperture=defaults_dict.get('apername', ''), + subarray=defaults_dict.get('subarray', ''), + pupil=defaults_dict.get('pupil', ''), + expstart=defaults_dict.get('expstart', 0.0)) + + for key, value in defaults.items(): + setattr(root_file_info_instance, key, value) + root_file_info_instance.save() + if rfi_created: + nr_files_created += 1 + except Exception as e: + logging.warning(f'\tError {e} was raised') + logging.warning(f'\tError with root_name: {file} inst: {instrument} obsnum: {obs_instance} proposal: {prop}') + if nr_files_created > 0: + logging.info(f'Created {nr_files_created} rootfileinfo entries for: {instrument} - proposal:{prop} - obs:{obs}') + + +@log_fail +def fill_empty_model(model_name, model_field): + '''`fill_empty_model` takes a model name and a model field as input, and then updates all the models in + the database that have a null, empty, or zero value for that field. + + Parameters + ---------- + model_name + the name of the model to be updated + model_field + the name of the field in the model that is empty + + ''' + + model_field_null = model_field + "__isnull" + model_field_empty = model_field + "__exact" + + model = apps.get_model('jwql', model_name) + null_models = empty_models = zero_models = model.objects.none() + + # filter(field__isnull=True) + try: + null_models = model.objects.filter(**{model_field_null: True}) + except ValueError: + pass + + # filter(field__exact='') + try: + empty_models = model.objects.filter(**{model_field_empty: ''}) + except ValueError: + pass + + # filter(field=0) + try: + zero_models = model.objects.filter(**{model_field: 0}) + except ValueError: + pass + + model_set = null_models | empty_models | zero_models + if model_set.exists(): + logging.info(f'{model_set.count()} models to be updated') + if model_name == 'proposal': + fill_empty_proposals(model_set) + elif model_name == 'rootfileinfo': + fill_empty_rootfileinfo(model_set) + else: + logging.warning(f'Filling {model_name} model is not currently implemented') + print(f'Filling {model_name} model is not currently implemented') + + +def fill_empty_proposals(proposal_set): + '''It takes a list of proposal querysets, finds the thumbnail and category for each proposal, and saves + the proposal + + Parameters + ---------- + proposal_set : a queryset of Proposal objects + + ''' + + saved_proposals = 0 + for proposal_mod in proposal_set: + + filenames_public, metadata_public, filenames_proprietary, metadata_proprietary = get_all_possible_filenames_for_proposal(proposal_mod.archive.instrument, proposal_mod.prop_id) + # Find the location in the filesystem for all files + filepaths_public = files_in_filesystem(filenames_public, 'public') + filepaths_proprietary = files_in_filesystem(filenames_proprietary, 'proprietary') + filenames = filepaths_public + filepaths_proprietary + proposal_info = get_proposal_info(filenames) + + # There is one and only one category for the proposal, so just take the first. + proposal_category = '' + if len(metadata_public['category']): + proposal_category = metadata_public['category'][0] + elif len(metadata_proprietary['category']): + proposal_category = metadata_proprietary['category'][0] + + proposal_mod.thumbnail_path = proposal_info['thumbnail_paths'][0] + proposal_mod.category = proposal_category + try: + proposal_mod.save(update_fields=['thumbnail_path', 'category']) + saved_proposals += 1 + except Exception as e: + logging.warning(f'\tCould not save proposal {proposal_mod.prop_id}') + logging.warning(f'\tError {e} was raised') + + logging.info(f'\tSaved {saved_proposals} proposals') + + +def fill_empty_rootfileinfo(rootfileinfo_set): + '''Takes a queryset of RootFileInfo objects and fills in the empty fields with values + from the MAST database + + Parameters + ---------- + rootfileinfo_set : a queryset of RootFileInfo objects + + ''' + + saved_rootfileinfos = 0 + for rootfileinfo_mod in rootfileinfo_set: + defaults_dict = mast_query_by_rootname(rootfileinfo_mod.instrument, rootfileinfo_mod.root_name) + + defaults = dict(filter=defaults_dict.get('filter', ''), + detector=defaults_dict.get('detector', ''), + exp_type=defaults_dict.get('exp_type', ''), + read_patt=defaults_dict.get('readpatt', ''), + grating=defaults_dict.get('grating', ''), + read_patt_num=defaults_dict.get('patt_num', 0), + aperture=defaults_dict.get('apername', ''), + subarray=defaults_dict.get('subarray', ''), + pupil=defaults_dict.get('pupil', ''), + expstart=defaults_dict.get('expstart', 0.0)) + + for key, value in defaults.items(): + setattr(rootfileinfo_mod, key, value) + try: + rootfileinfo_mod.save() + saved_rootfileinfos += 1 + except Exception as e: + logging.warning(f'\tCould not save rootfileinfo {rootfileinfo_mod.root_name}') + logging.warning(f'\tError {e} was raised') + logging.info(f'\tSaved {saved_rootfileinfos} Root File Infos') + + +@lock_module +def protected_code(update_database, fill_empty_list): + """Protected code ensures only 1 instance of module will run at any given time + + Parameters + ---------- + update_database : bool + If True, any existing rootfileinfo models are overwritten + """ + module = os.path.basename(__file__).strip('.py') + start_time, log_file = initialize_instrument_monitor(module) + + if fill_empty_list: + fill_empty_model(fill_empty_list[0], fill_empty_list[1]) + else: + get_updates(update_database) + + +if __name__ == '__main__': + + models_list = ['archive', 'observation', 'proposal', 'rootfileinfo'] + proposal_fields = ['category'] + # Initialize parser + msg = "Used to update Django Model Database from header information" + parser = argparse.ArgumentParser(description=msg) + + # Adding optional argument + parser.add_argument("--update", action='store_true', help="Update Entire Model Database") + parser.add_argument("--fill_empty", nargs=2, help="enter 2 arguments-> model_name, model_field") + + args = parser.parse_args() + continue_script = True + if args.fill_empty: + continue_script = False + args.fill_empty = list(map(lambda x: x.lower(), args.fill_empty)) + if args.fill_empty[0] not in models_list: + print('model_name incorrect, try: {}'.format(models_list)) + else: + model = apps.get_model('jwql', args.fill_empty[0]) + fields = [field.name for field in model._meta.get_fields()] + if args.fill_empty[1] not in fields: + print('Invalid field entered for model type {}, try one of the following: {}'.format(args.fill_empty[0], fields)) + else: + continue_script = True + + if continue_script: + protected_code(args.update, args.fill_empty) diff --git a/jwql/website/apps/jwql/bokeh_containers.py b/jwql/website/apps/jwql/bokeh_containers.py index 532401a32..dad0ad69d 100644 --- a/jwql/website/apps/jwql/bokeh_containers.py +++ b/jwql/website/apps/jwql/bokeh_containers.py @@ -8,6 +8,7 @@ ------- - Gray Kanarek + - Bryan Hilbert Use --- @@ -24,8 +25,12 @@ from bokeh.embed import components from bokeh.layouts import layout from bokeh.models.widgets import Tabs, Panel +from bokeh.plotting import figure, output_file +import numpy as np +import pysiaf -from . import monitor_pages +from jwql.website.apps.jwql import monitor_pages +from jwql.website.apps.jwql.monitor_pages.monitor_dark_bokeh import DarkMonitorPlots from jwql.utils.constants import BAD_PIXEL_TYPES, FULL_FRAME_APERTURES from jwql.utils.utils import get_config @@ -33,10 +38,59 @@ FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') PACKAGE_DIR = os.path.dirname(__location__.split('website')[0]) REPO_DIR = os.path.split(PACKAGE_DIR)[0] +TEMPLATE_DIR = os.path.join(PACKAGE_DIR, 'website/apps/jwql/templates') -def bad_pixel_monitor_tabs(instrument): - """Creates the various tabs of the bad pixel monitor results page. +def add_limit_boxes(fig, yellow=None, red=None): + """Add gree/yellow/red background colors + + Parameters + ---------- + fig : bokeh.plotting.figure + Bokeh figure of the telemetry values + + yellow : tup + 2-Tuple of (low, high) values. If provided, the areas of the plot less than + and greater than will be given a yellow background, to indicate an area + of concern. + + red : tup + 2-Tuple of (low, high) values. If provided, the areas of the plot less than + and greater than will be given a red background, to indicate values that + may indicate an error. It is assumed that the low value of red is less + than the low value of yellow, and that the high value of red is + greater than the high value of yellow. + """ + if yellow is not None: + green = BoxAnnotation(bottom=yellow_limits[0], top=yellow_limits[1], fill_color='chartreuse', fill_alpha=0.2) + fig.add_layout(green) + if red is not None: + yellow_high = BoxAnnotation(bottom=yellow_limits[1], top=red_limits[1], fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_high) + yellow_low = BoxAnnotation(bottom=red_limits[0], top=yellow_limits[0], fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_low) + red_high = BoxAnnotation(bottom=red_limits[1], top=red_limits[1] + 100, fill_color='red', fill_alpha=0.1) + fig.add_layout(red_high) + red_low = BoxAnnotation(bottom=red_limits[0] - 100, top=red_limits[0], fill_color='red', fill_alpha=0.1) + fig.add_layout(red_low) + else: + yellow_high = BoxAnnotation(bottom=yellow_limits[1], top=yellow_limits[1] + 100, fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_high) + yellow_low = BoxAnnotation(bottom=yellow_limits[0] - 100, top=yellow_limits[0], fill_color='gold', fill_alpha=0.2) + fig.add_layout(yellow_low) + else: + if red is not None: + green = BoxAnnotation(bottom=red_limits[0], top=red_limits[1], fill_color='chartreuse', fill_alpha=0.2) + fig.add_layout(green) + red_high = BoxAnnotation(bottom=red_limits[1], top=red_limits[1] + 100, fill_color='red', fill_alpha=0.1) + fig.add_layout(red_high) + red_low = BoxAnnotation(bottom=red_limits[0] - 100, top=red_limits[0], fill_color='red', fill_alpha=0.1) + fig.add_layout(red_low) + return fig + + +def cosmic_ray_monitor_tabs(instrument): + """Creates the various tabs of the cosmic monitor results page. Parameters ---------- @@ -46,64 +100,74 @@ def bad_pixel_monitor_tabs(instrument): Returns ------- div : str - The HTML div to render bad pixel monitor plots + The HTML div to render cosmic ray monitor plots script : str - The JS script to render bad pixel monitor plots + The JS script to render cosmic ray monitor plots """ + full_apertures = FULL_FRAME_APERTURES[instrument.upper()] - templates_all_apertures = {} + histograms_all_apertures = [] + history_all_apertures = [] for aperture in full_apertures: # Start with default values for instrument and aperture because # BokehTemplate's __init__ method does not allow input arguments - monitor_template = monitor_pages.BadPixelMonitor() - - # Set instrument and monitor using DarkMonitor's setters - monitor_template.aperture_info = (instrument, aperture) - templates_all_apertures[aperture] = monitor_template - - # for reference - here are the bad pixel types - # badpix_types_from_flats = ['DEAD', 'LOW_QE', 'OPEN', 'ADJ_OPEN'] - # badpix_types_from_darks = ['HOT', 'RC', 'OTHER_BAD_PIXEL', 'TELEGRAPH'] - - # We loop over detectors here, and create one tab per detector, rather - # than one tab for each plot type, as is done with the dark monitor - all_tabs = [] - for aperture_name, template in templates_all_apertures.items(): - - tab_plots = [] - # Add the image of bad pixels found in darks - dark_image = template.refs["dark_position_figure"] - dark_image.sizing_mode = "scale_width" # Make sure the sizing is adjustable - tab_plots.append(dark_image) - - # Add the image of bad pixels found in flats - flat_image = template.refs["flat_position_figure"] - flat_image.sizing_mode = "scale_width" # Make sure the sizing is adjustable - tab_plots.append(flat_image) - - # Add history plots - for badpix_type in BAD_PIXEL_TYPES: - history = template.refs["{}_history_figure".format(badpix_type.lower())] - history.sizing_mode = "scale_width" # Make sure the sizing is adjustable - tab_plots.append(history) - - # Let's put two plots per line - badpix_layout = layout( - tab_plots[0:2], - tab_plots[2:4], - tab_plots[4:6], - tab_plots[6:8], - tab_plots[8:10] + monitor_template = monitor_pages.CosmicRayMonitor(instrument.lower(), aperture) + + # Set instrument and monitor using CosmicRayMonitor's setters + # monitor_template.aperture_info = (instrument, aperture) + # templates_all_apertures[aperture] = monitor_template + histograms_all_apertures.append(monitor_template.histogram_figure) + history_all_apertures.append(monitor_template.history_figure) + + if instrument.lower() == 'nircam': + # Histogram tab + a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = histograms_all_apertures + histogram_layout = layout( + [a2, a4, b3, b1], + [a1, a3, b4, b2], + [a5, b5] ) - badpix_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable - badpix_tab = Panel(child=badpix_layout, title=aperture_name) - all_tabs.append(badpix_tab) + # CR Rate History tab + a1_line, a2_line, a3_line, a4_line, a5_line, b1_line, b2_line, b3_line, b4_line, b5_line = history_all_apertures + line_layout = layout( + [a2_line, a4_line, b3_line, b1_line], + [a1_line, a3_line, b4_line, b2_line], + [a5_line, b5_line] + ) + + elif instrument.lower() in ['miri', 'niriss', 'nirspec']: + # Histogram tab + single_aperture = histograms_all_apertures[0] + histogram_layout = layout( + [single_aperture] + ) + + # CR Rate History tab + single_aperture_line = history_all_apertures[0] + line_layout = layout( + [single_aperture_line] + ) + + elif instrument.lower() == 'fgs': + # Histogram tab + g1, g2 = histograms_all_apertures + histogram_layout = layout([g1, g2]) + + # CR Rate History tab + g1_line, g2_line = history_all_apertures + line_layout = layout([g1_line, g2_line]) + + # Allow figure sizes to scale with window + histogram_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable + histogram_tab = Panel(child=histogram_layout, title="Histogram") + line_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable + line_tab = Panel(child=line_layout, title="Trending") # Build tabs - tabs = Tabs(tabs=all_tabs) + tabs = Tabs(tabs=[histogram_tab, line_tab]) # Return tab HTML and JavaScript to web app script, div = components(tabs) @@ -111,8 +175,8 @@ def bad_pixel_monitor_tabs(instrument): return div, script -def bias_monitor_tabs(instrument): - """Creates the various tabs of the bias monitor results page. +def dark_monitor_tabs(instrument): + """Creates the various tabs of the dark monitor results page. Parameters ---------- @@ -122,59 +186,25 @@ def bias_monitor_tabs(instrument): Returns ------- div : str - The HTML div to render bias monitor plots + The HTML div to render dark monitor plots script : str - The JS script to render bias monitor plots + The JS script to render dark monitor plots """ + # This will query for the data and produce the plots + plots = DarkMonitorPlots(instrument) - # Make a separate tab for each aperture - tabs = [] - for aperture in FULL_FRAME_APERTURES[instrument.upper()]: - monitor_template = monitor_pages.BiasMonitor() - monitor_template.input_parameters = (instrument, aperture) + # Define the layout for each plot type + histogram_layout = standard_monitor_plot_layout(instrument, plots.hist_plots) + trending_layout = standard_monitor_plot_layout(instrument, plots.trending_plots) + image_layout = standard_monitor_plot_layout(instrument, plots.dark_image_data) - # Add the mean bias vs time plots for each amp and odd/even columns - plots = [] - for amp in ['1', '2', '3', '4']: - for kind in ['even', 'odd']: - bias_plot = monitor_template.refs['mean_bias_figure_amp{}_{}'.format(amp, kind)] - bias_plot.sizing_mode = 'scale_width' # Make sure the sizing is adjustable - plots.append(bias_plot) - - # Add the calibrated 0th group image - calibrated_image = monitor_template.refs['cal_image'] - calibrated_image.sizing_mode = 'scale_width' - calibrated_image.margin = (0, 100, 0, 100) # Add space around sides of figure - plots.append(calibrated_image) - - # Add the calibrated 0th group histogram - if instrument == 'NIRISS': - calibrated_hist = monitor_template.refs['cal_hist'] - calibrated_hist.sizing_mode = 'scale_width' - calibrated_hist.margin = (0, 190, 0, 190) - plots.append(calibrated_hist) - - # Add the collapsed row/column plots - if instrument != 'NIRISS': - for direction in ['rows', 'columns']: - collapsed_plot = monitor_template.refs['collapsed_{}_figure'.format(direction)] - collapsed_plot.sizing_mode = 'scale_width' - plots.append(collapsed_plot) - - # Put the mean bias plots on the top 2 rows, the calibrated image on the - # third row, and the remaining plots on the bottom row. - bias_layout = layout( - plots[0:8][::2], - plots[0:8][1::2], - plots[8:9], - plots[9:] - ) - bias_layout.sizing_mode = 'scale_width' - bias_tab = Panel(child=bias_layout, title=aperture) - tabs.append(bias_tab) + # Create a tab for each type of plot + histogram_tab = Panel(child=histogram_layout, title="Dark Rate Histogram") + line_tab = Panel(child=trending_layout, title="Trending") + image_tab = Panel(child=image_layout, title="Mean Dark Image") # Build tabs - tabs = Tabs(tabs=tabs) + tabs = Tabs(tabs=[histogram_tab, line_tab, image_tab]) # Return tab HTML and JavaScript to web app script, div = components(tabs) @@ -182,7 +212,7 @@ def bias_monitor_tabs(instrument): return div, script -def dark_monitor_tabs(instrument): +def edb_monitor_tabs(instrument): """Creates the various tabs of the dark monitor results page. Parameters @@ -197,99 +227,82 @@ def dark_monitor_tabs(instrument): script : str The JS script to render dark monitor plots """ + html_file_list = file_list[instrument] + print('read in html files') - full_apertures = FULL_FRAME_APERTURES[instrument.upper()] - templates_all_apertures = {} - for aperture in full_apertures: +def generic_telemetry_plot(times, values, name, nominal_value=None, yellow_limits=None, + red_limits=None, save=True): + """Create a value versus time plot of a single telemetry mnemonic. Optionally + add background colors corresponding to good (green), warning (yellow), and red + (error) values. - # Start with default values for instrument and aperture because - # BokehTemplate's __init__ method does not allow input arguments - monitor_template = monitor_pages.DarkMonitor() + Parameters + ---------- + times : list + List of datetime instances - # Set instrument and monitor using DarkMonitor's setters - monitor_template.aperture_info = (instrument, aperture) - templates_all_apertures[aperture] = monitor_template + values : list + Telemetry values - # Histogram tab - histograms_all_apertures = [] - for aperture_name, template in templates_all_apertures.items(): - histogram = template.refs["dark_full_histogram_figure"] - histogram.sizing_mode = "scale_width" # Make sure the sizing is adjustable - histograms_all_apertures.append(histogram) + name : str + Name of the telemetry mnemonic (e.g. 'SE_ZINRCICE1') - if instrument == 'NIRCam': - a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = histograms_all_apertures - histogram_layout = layout( - [a2, a4, b3, b1], - [a1, a3, b4, b2], - [a5, b5] - ) + nominal_value : float + Optional expected value for the mnemonic. If provided, a horizontal dashed line + at this value will be added to the plot. - elif instrument in ['NIRISS', 'MIRI']: - single_aperture = histograms_all_apertures[0] - histogram_layout = layout( - [single_aperture] - ) + yellow_limits : tup + Tuple of (low, high) values. If provided, the areas of the plot less than + and greater than will be given a yellow background, to indicate an area + of concern. - elif instrument in ['NIRSpec', 'FGS']: - d1, d2 = histograms_all_apertures - histogram_layout = layout( - [d1, d2] - ) + red_limits : tup + Tuple of (low, high) values. If provided, the areas of the plot less than + and greater than will be given a red background, to indicate values that + may indicate an error. It is assumed that the low value of red_limits is less + than the low value of yellow_limits, and that the high value of red_limits is + greater than the high value of yellow_limits. - histogram_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable - histogram_tab = Panel(child=histogram_layout, title="Histogram") + save : bool + If True, save the plot to an html file. - # Current v. time tab - lines_all_apertures = [] - for aperture_name, template in templates_all_apertures.items(): - line = template.refs["dark_current_time_figure"] - line.title.align = "center" - line.title.text_font_size = "20px" - line.sizing_mode = "scale_width" # Make sure the sizing is adjustable - lines_all_apertures.append(line) - - if instrument == 'NIRCam': - a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = lines_all_apertures - line_layout = layout( - [a2, a4, b3, b1], - [a1, a3, b4, b2], - [a5, b5] - ) + Returns + ------- + fig : bokeh.plotting.figure + Telemetry plot object + """ + if save: + output_file(f"telem_plot_{name}.html") - elif instrument in ['NIRISS', 'MIRI']: - single_aperture = lines_all_apertures[0] - line_layout = layout( - [single_aperture] - ) + fig = figure(width=400, height=400, x_axis_label='Date', y_axis_label='Voltage', + x_axis_type='datetime') + fig.circle(times, values, size=4, color='navy', alpha=0.5) - elif instrument in ['NIRSpec', 'FGS']: - d1, d2 = lines_all_apertures - line_layout = layout( - [d1, d2] - ) + if nominal_value is not None: + fig.line(times, np.repeat(nominal_value, len(times)), line_dash='dashed') - line_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable - line_tab = Panel(child=line_layout, title="Trending") + fig.xaxis.formatter = DatetimeTickFormatter(hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"], + ) + fig.xaxis.major_label_orientation = np.pi / 4 - # Mean dark image tab + fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) - # The three lines below work for displaying a single image - image = templates_all_apertures[full_apertures[0]].refs["mean_dark_image_figure"] - image.sizing_mode = "scale_width" # Make sure the sizing is adjustable - image_layout = layout(image) - image.height = 250 # Not working - image_layout.sizing_mode = "scale_width" - image_tab = Panel(child=image_layout, title="Mean Dark Image") + return fig - # Build tabs - tabs = Tabs(tabs=[histogram_tab, line_tab, image_tab]) - # Return tab HTML and JavaScript to web app - script, div = components(tabs) +def identify_dark_monitor_tables(instrument): + """Determine which dark current database tables as associated with + a given instrument""" - return div, script + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()] + query_table = eval('{}DarkQueryHistory'.format(mixed_case_name)) + pixel_table = eval('{}DarkPixelStats'.format(mixed_case_name)) + stats_table = eval('{}DarkDarkCurrent'.format(mixed_case_name)) + return query_table, pixel_table, stats_table def readnoise_monitor_tabs(instrument): @@ -351,3 +364,88 @@ def readnoise_monitor_tabs(instrument): script, div = components(tabs) return div, script + + +def standard_monitor_plot_layout(instrument, plots): + """Arrange a set of plots into a bokeh layout. The layout will + show the plots for full frame apertures in an orientation that + matches the relative detector locations within the instrument. + Any subarray aperture plots will be arranged below the full frame + plots, with 4 plots to a row, in an order matching that in pysiaf's + aperture list. This function assumes that there are plots for all full + frame apertures present. + + Parameters + ---------- + instrument : str + Name of the instrument that the plots are for + + plots : dict + Dictionary containing a set of plots for an instrument. + Keys are aperture names (e.g. NRCA1_FULL) and values are the + plots (bokeh figures) + + Returns + ------- + plot_layout : bokeh.layouts.layout + """ + # Generate nested lists of the full frame apertures, which will be shown at the top + # of the tab. Note that order below is intentional. It mimics the detectors' locations + # relative to one another in the focal plane. + if instrument.lower() == 'nircam': + full_frame_lists = [ + [plots['NRCA2_FULL'], plots['NRCA4_FULL'], plots['NRCB3_FULL'], plots['NRCB1_FULL']], + [plots['NRCA1_FULL'], plots['NRCA3_FULL'], plots['NRCB4_FULL'], plots['NRCB2_FULL']], + [plots['NRCA5_FULL'], plots['NRCB5_FULL']] + ] + elif instrument.lower() == 'niriss': + full_frame_lists = [ + [plots['NIS_CEN']] + ] + elif instrument.lower() == 'miri': + full_frame_lists = [ + [plots['MIRIM_FULL']] + ] + elif instrument.lower() == 'nirspec': + full_frame_lists = [ + [plots['NRS1_FULL'], plots['NRS2_FULL']] + ] + elif instrument.lower() == 'fgs': + full_frame_lists = [ + [plots['FGS1_FULL'], plots['FGS2_FULL']] + ] + + # Next create lists of subarrays. Keep the subarrays in the order in which + # they exist in pyiaf, in order to make the page a little more readable. + # The dark monitor also populates aperture names using pysiaf. + subarrs = [p for p in plots.keys() if p not in FULL_FRAME_APERTURES[instrument.upper()]] + siaf = pysiaf.Siaf(instrument.lower()) + all_apertures = np.array(list(siaf.apernames)) + + indexes = [] + for key in subarrs: + subarr_plot_idx = np.where(all_apertures == key)[0] + if len(subarr_plot_idx) > 0: + indexes.append(subarr_plot_idx[0]) + to_sort = np.argsort(indexes) + sorted_keys = np.array(subarrs)[to_sort] + + # Place 4 subarray plots in each row. Generate a nested + # list where each sublist contains the plots to place in + # a given row + subarr_plots_per_row = 4 + first_col = np.arange(0, len(sorted_keys), 4) + + subarr_lists = [] + for idx in first_col: + row_keys = sorted_keys[idx: idx + subarr_plots_per_row] + row_list = [plots[key] for key in row_keys] + subarr_lists.append(row_list) + + # Combine full frame and subarray aperture lists + full_list = full_frame_lists + subarr_lists + + # Now create a layout that holds the lists + plot_layout = layout(full_list) + + return plot_layout diff --git a/jwql/website/apps/jwql/bokeh_dashboard.py b/jwql/website/apps/jwql/bokeh_dashboard.py index c28710729..2c282e274 100644 --- a/jwql/website/apps/jwql/bokeh_dashboard.py +++ b/jwql/website/apps/jwql/bokeh_dashboard.py @@ -11,6 +11,7 @@ ------- - Mees B. Fix + - Bryan Hilbert Use --- @@ -31,19 +32,97 @@ from datetime import datetime as dt from math import pi +from operator import itemgetter +import os -from bokeh.models import Axis, ColumnDataSource, DatetimeTickFormatter, OpenURL, TapTool +from bokeh.layouts import column +from bokeh.models import Axis, ColumnDataSource, DatetimeTickFormatter, HoverTool, OpenURL, TapTool from bokeh.models.widgets import Panel, Tabs from bokeh.plotting import figure from bokeh.transform import cumsum import numpy as np import pandas as pd +from sqlalchemy import func, and_ -from jwql.utils.constants import ANOMALY_CHOICES_PER_INSTRUMENT, FILTERS_PER_INSTRUMENT -from jwql.utils.utils import get_base_url +import jwql.database.database_interface as di +from jwql.database.database_interface import CentralStore +from jwql.utils.constants import ANOMALY_CHOICES_PER_INSTRUMENT, FILTERS_PER_INSTRUMENT, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.utils import get_base_url, get_config from jwql.website.apps.jwql.data_containers import build_table +def build_table_latest_entry(tablename): + """Create Pandas dataframe from the most recent entry of a JWQLDB table. + + Parameters + ---------- + tablename : str + Name of JWQL database table name. + + Returns + ------- + table_meta_data : pandas.DataFrame + Pandas data frame version of JWQL database table. + """ + # Make dictionary of tablename : class object + # This matches what the user selects in the select element + # in the webform to the python object on the backend. + tables_of_interest = {} + for item in di.__dict__.keys(): + table = getattr(di, item) + if hasattr(table, '__tablename__'): + tables_of_interest[table.__tablename__] = table + + session, _, _, _ = di.load_connection(get_config()['connection_string']) + table_object = tables_of_interest[tablename] # Select table object + + subq = session.query(table_object.instrument, + func.max(table_object.date).label('maxdate') + ).group_by(table_object.instrument).subquery('t2') + + result = session.query(table_object).join( + subq, + and_( + table_object.instrument == subq.c.instrument, + table_object.date == subq.c.maxdate + ) + ) + + # Turn query result into list of dicts + result_dict = [row.__dict__ for row in result.all()] + column_names = table_object.__table__.columns.keys() + + # Build list of column data based on column name. + data = [] + for column in column_names: + column_data = list(map(itemgetter(column), result_dict)) + data.append(column_data) + + data = dict(zip(column_names, data)) + + # Build table. + table_meta_data = pd.DataFrame(data) + + session.close() + return table_meta_data + + +def create_filter_based_pie_chart(title, source): + """ + """ + pie = figure(height=400, title=title, toolbar_location=None, + tools="hover", tooltips="@filter: @value", x_range=(-0.5, 0.5), y_range=(0.5, 1.5)) + + pie.wedge(x=0, y=1, radius=0.3, + start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), + line_color="white", fill_color='colors', source=source) + + pie.axis.axis_label = None + pie.axis.visible = False + pie.grid.grid_line_color = None + return pie + + def disable_scientific_notation(figure): """Disable y axis scientific notation. @@ -59,6 +138,17 @@ def disable_scientific_notation(figure): pass +""" +# Currently unused; preserved for reference when moving to bokeh 3 +def treemap(df, col, x, y, dx, dy, *, N=100): + sub_df = df.nlargest(N, col) + normed = normalize_sizes(sub_df[col], dx, dy) + blocks = squarify(normed, x, y, dx, dy) + blocks_df = pd.DataFrame.from_dict(blocks).set_index(sub_df.index) + return sub_df.join(blocks_df, how='left').reset_index() +""" + + class GeneralDashboard: def __init__(self, delta_t=None): @@ -68,6 +158,198 @@ def __init__(self, delta_t=None): now = dt.now() self.date = pd.Timestamp('{}-{}-{}'.format(now.year, now.month, now.day)) + def dashboard_disk_usage(self): + """Create trending plot of data volume for various disks. Here we are plotting + the results of a "df -hk" call for top-level directories. The results (i.e. on + central store) may contain contributions from non-JWQL files, since JWQL can + share disks with other projects. These plots are useful for tracking disk usage + and requesting more disk space if needed. + + Returns + ------- + tabs : bokeh.models.widgets.widget.Widget + Set of tabs containing plots of the used and available disk space + """ + # There are two main disks that we want to show usage for. The central store + # area, and the disk that is internal to the server. Use the logs entry to + # get the central store information, and the preview_image entry to get + # server disk information. + config = get_config() + + log_data = di.session.query(CentralStore.date, CentralStore.size, CentralStore.available) \ + .filter(CentralStore.area == 'logs') \ + .all() + + # Convert to dataframe + log_data = pd.DataFrame(log_data) + + preview_data = di.session.query(CentralStore.date, CentralStore.size, CentralStore.available) \ + .filter(CentralStore.area == 'preview_images') \ + .all() + + # Convert to dataframe + preview_data = pd.DataFrame(preview_data) + + # If the user is requesting a certain time range, cut down the entries + if not pd.isnull(self.delta_t): + log_data = log_data[(log_data['date'] >= self.date - self.delta_t) & (log_data['date'] <= self.date)] + preview_data = preview_data[(preview_data['date'] >= self.date - self.delta_t) & (preview_data['date'] <= self.date)] + + log_results = {'dirname': os.path.abspath(os.path.join(config['log_dir'], '../')), + 'results': log_data, + 'shortname': 'Central Store' + } + + preview_results = {'dirname': os.path.abspath(os.path.join(config['preview_image_filesystem'], '../')), + 'results': preview_data, + 'shortname': 'Server' + } + + # Plot total data volume and available disk space versus time + plots = {} + hover_tool = {} + tabs = [] + for data in [preview_results, log_results]: + + # Calculate the size of the data + data['results']['used'] = data['results']['size'] - data['results']['available'] + source = ColumnDataSource(data['results']) + + # Initialize plot + plots[data['shortname']] = figure(tools='pan,box_zoom,wheel_zoom,reset,save', + plot_width=800, + x_axis_type='datetime', + title=f"Available & Used Storage on {data['shortname']}", + x_axis_label='Date', + y_axis_label='Disk Space (TB)') + + plots[data['shortname']].line(x='date', y='available', source=source, legend_label='Available', line_dash='dashed', line_color='#C85108', line_width=3) + plots[data['shortname']].circle(x='date', y='available', source=source,color='#C85108', size=10) + plots[data['shortname']].line(x='date', y='used', source=source, legend_label='Used', line_dash='dashed', line_color='#355C7D', line_width=3) + plots[data['shortname']].circle(x='date', y='used', source=source,color='#355C7D', size=10) + + plots[data['shortname']].xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], + days=["%d %B %Y"], + months=["%d %B %Y"], + years=["%B %Y"], + ) + plots[data['shortname']].xaxis.major_label_orientation = pi / 4 + plots[data['shortname']].legend.location = 'top_left' + + hover_tool[data['shortname']] = HoverTool(tooltips=[('Available:', '@available'), + ('Used:', '@used'), + ('Date:', '@date{%d %b %Y}') + ]) + hover_tool[data['shortname']].formatters = {'@date': 'datetime'} + plots[data['shortname']].tools.append(hover_tool[data['shortname']]) + tabs.append(Panel(child=plots[data['shortname']], title=f"{data['shortname']} Storage")) + + tabs = Tabs(tabs=tabs) + + di.session.close() + return tabs + + + def dashboard_central_store_data_volume(self): + """Create trending plot of data volume for various JWQL-related areas on disk. + These plots show data volumes calculated by walking over subdirectories/files in + the JWQL-specific directories. So these plots may not include the total used + disk volume, in the cases where JWQL is sharing a disk with other projects. These + plots are useful for monitoring the total volume of e.g. our preview images. + + Returns + ------- + tabs : bokeh.models.widgets.widget.Widget + A figure with tabs for each central store area + """ + # Initialize plot + plot = figure(tools='pan,box_zoom,wheel_zoom,reset,save', + plot_width=800, + x_axis_type='datetime', + title='JWQL directory size', + x_axis_label='Date', + y_axis_label='Disk Space (TB)') + + # This part of the plot should cycle through areas and plot area used values vs. date + #arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] + arealist = ['logs', 'outputs', 'preview_images', 'thumbnails'] + colors = ['#F8B195', '#F67280', '#6C5B7B', '#355C7D'] + for area, color in zip(arealist, colors): + + # Query for used sizes + results = di.session.query(CentralStore.date, CentralStore.used).filter(CentralStore.area == area).all() + + if results: + # Convert to dataframe + results = pd.DataFrame(results) + + if not pd.isnull(self.delta_t): + results = results[(results['date'] >= self.date - self.delta_t) & (results['date'] <= self.date)] + + # Plot the results + source = ColumnDataSource(results) + plot.line(x='date', y='used', source=source, line_color=color, line_dash='dashed', legend_label=area, line_width=3) + plot.circle(x='date', y='used', source=source, color=color, size=10) + + hover_tool = HoverTool(tooltips=[('Used:', f'@used TB'), + ('Date:', '@date{%d %b %Y}') + ]) + hover_tool.formatters = {'@date': 'datetime'} + plot.tools.append(hover_tool) + + plot.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], + days=["%d %B %Y"], + months=["%d %B %Y"], + years=["%B %Y"], + ) + plot.xaxis.major_label_orientation = pi / 4 + plot.legend.location = 'top_left' + + # Put the "all" plot in a separate figure because it will be larger than all the pieces, which would + # throw off the y range if it were in a single plot + cen_store_plot = figure(tools='pan,box_zoom,wheel_zoom,reset,save', + plot_width=800, + x_axis_type='datetime', + title='JWQL central store directory, total data volume', + x_axis_label='Date', + y_axis_label='Disk Space (TB)') + + cen_store_results = di.session.query(CentralStore.date, CentralStore.used).filter(CentralStore.area == 'all').all() + + # Group by date + if cen_store_results: + + # Convert to dataframe + cen_store_results = pd.DataFrame(cen_store_results) + + if not pd.isnull(self.delta_t): + cen_store_results = cen_store_results[(cen_store_results['date'] >= self.date - self.delta_t) & (cen_store_results['date'] <= self.date)] + + # Group by date + cen_store_source = ColumnDataSource(cen_store_results) + + # Plot the results + legend_str = 'File volume' + cen_store_plot.line(x='date', y='used', source=cen_store_source, legend_label=legend_str, line_dash='dashed', line_color='#355C7D', line_width=3) + cen_store_plot.circle(x='date', y='used', source=cen_store_source, color='#355C7D', size=10) + cen_store_plot.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], + days=["%d %B %Y"], + months=["%d %B %Y"], + years=["%B %Y"], + ) + cen_store_plot.xaxis.major_label_orientation = pi / 4 + cen_store_plot.legend.location = 'top_left' + + hover_tool = HoverTool(tooltips=[('Used:', f'@used TB'), + ('Date:', '@date{%d %b %Y}') + ]) + hover_tool.formatters = {'@date': 'datetime'} + cen_store_plot.tools.append(hover_tool) + + di.session.close() + return plot, cen_store_plot + + def dashboard_filetype_bar_chart(self): """Build bar chart of files based off of type @@ -80,22 +362,22 @@ def dashboard_filetype_bar_chart(self): # Make Pandas DF for filesystem_instrument # If time delta exists, filter data based on that. data = build_table('filesystem_instrument') - if not pd.isnull(self.delta_t): - data = data[(data['date'] >= (self.date - self.delta_t)) & (data['date'] <= self.date)] + + # Keep only the rows containing the most recent timestamp + data = data[data['date'] == data['date'].max()] + date_string = data['date'].max().strftime("%d %b %Y") # Set title and figures list to make panels - title = 'File Types per Instrument' figures = [] - # Group by instrument/filetype and sum the number of files that have that specific combination - data_by_filetype = data.groupby(["instrument", "filetype"]).size().reset_index(name="count") - # For unique instrument values, loop through data # Find all entries for instrument/filetype combo # Make figure and append it to list. for instrument in data.instrument.unique(): - index = data_by_filetype["instrument"] == instrument - figures.append(self.make_panel(data_by_filetype['filetype'][index], data_by_filetype['count'][index], instrument, title, 'File Type')) + index = data["instrument"] == instrument + inst_only = data[index].sort_values('filetype') + title = f'{JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()]} files per Filetype: {date_string}' + figures.append(self.make_panel(inst_only['filetype'], inst_only['count'], instrument, title, 'File Type')) tabs = Tabs(tabs=figures) @@ -109,18 +391,19 @@ def dashboard_instrument_pie_chart(self): plot : bokeh.plotting.figure Pie chart figure """ - # Replace with jwql.website.apps.jwql.data_containers.build_table data = build_table('filesystem_instrument') - if not pd.isnull(self.delta_t): - data = data[(data['date'] >= self.date - self.delta_t) & (data['date'] <= self.date)] + + # Keep only the rows containing the most recent timestamp + data = data[data['date'] == data['date'].max()] + date_string = data['date'].max().strftime("%d %b %Y") try: - file_counts = {'nircam': data.instrument.str.count('nircam').sum(), - 'nirspec': data.instrument.str.count('nirspec').sum(), - 'niriss': data.instrument.str.count('niriss').sum(), - 'miri': data.instrument.str.count('miri').sum(), - 'fgs': data.instrument.str.count('fgs').sum()} + file_counts = {'nircam': data[data.instrument == 'nircam']['count'].sum(), + 'nirspec': data[data.instrument == 'nirspec']['count'].sum(), + 'niriss': data[data.instrument == 'niriss']['count'].sum(), + 'miri': data[data.instrument == 'miri']['count'].sum(), + 'fgs': data[data.instrument == 'fgs']['count'].sum()} except AttributeError: file_counts = {'nircam': 0, 'nirspec': 0, @@ -131,12 +414,12 @@ def dashboard_instrument_pie_chart(self): data = pd.Series(file_counts).reset_index(name='value').rename(columns={'index': 'instrument'}) data['angle'] = data['value'] / data['value'].sum() * 2 * pi data['color'] = ['#F8B195', '#F67280', '#C06C84', '#6C5B7B', '#355C7D'] - plot = figure(title="Number of Files Per Instruments", toolbar_location=None, + plot = figure(title=f"Number of Files Per Instrument {date_string}", toolbar_location=None, tools="hover,tap", tooltips="@instrument: @value", x_range=(-0.5, 1.0)) plot.wedge(x=0, y=1, radius=0.4, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), - line_color="white", color='color', legend='instrument', source=data) + line_color="white", color='color', legend_label='instrument', source=data) url = "{}/@instrument".format(get_base_url()) taptool = plot.select(type=TapTool) @@ -164,30 +447,37 @@ def dashboard_files_per_day(self): date_times = [pd.to_datetime(datetime).date() for datetime in source['date'].values] source['datestr'] = [date_time.strftime("%Y-%m-%d") for date_time in date_times] - p1 = figure(title="Number of Files Added by Day", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=1700, x_axis_label='Date', y_axis_label='Number of Files Added') + p1 = figure(title="Number of Files in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=800, x_axis_label='Date', y_axis_label='Number of Files Added') p1.line(x='date', y='total_file_count', source=source, color='#6C5B7B', line_dash='dashed', line_width=3) + p1.scatter(x='date', y='total_file_count', source=source, color='#C85108', size=10) disable_scientific_notation(p1) tab1 = Panel(child=p1, title='Files Per Day') - p2 = figure(title="Available & Used Storage", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=1700, x_axis_label='Date', y_axis_label='Storage Space [Terabytes?]') - p2.line(x='date', y='available', source=source, color='#F8B195', line_dash='dashed', line_width=3, legend='Available Storage') - p2.line(x='date', y='used', source=source, color='#355C7D', line_dash='dashed', line_width=3, legend='Used Storage') + # Create separate tooltip for storage plot. + # Show date and used and available storage together + + p2 = figure(title="Available & Used Storage in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=800, x_axis_label='Date', y_axis_label='Disk Space (TB)') + p2.line(x='date', y='available', source=source, color='#C85108', line_dash='dashed', line_width=3, legend_label='Available Storage') + p2.line(x='date', y='used', source=source, color='#355C7D', line_dash='dashed', line_width=3, legend_label='Used Storage') + p2.scatter(x='date', y='available', source=source, color='#C85108', size=10) + p2.scatter(x='date', y='used', source=source, color='#355C7D', size=10) disable_scientific_notation(p2) tab2 = Panel(child=p2, title='Storage') - p1.xaxis.formatter = DatetimeTickFormatter(hours=["%d %B %Y"], + p1.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], days=["%d %B %Y"], months=["%d %B %Y"], - years=["%d %B %Y"], + years=["%B %Y"], ) p1.xaxis.major_label_orientation = pi / 4 - p2.xaxis.formatter = DatetimeTickFormatter(hours=["%d %B %Y"], + p2.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], days=["%d %B %Y"], months=["%d %B %Y"], - years=["%d %B %Y"], + years=["%B %Y"], ) p2.xaxis.major_label_orientation = pi / 4 + p2.legend.location = 'top_left' tabs = Tabs(tabs=[tab1, tab2]) @@ -211,13 +501,14 @@ def dashboard_monitor_tracking(self): if not pd.isnull(self.delta_t): data = data[(data['start_time'] >= self.date - self.delta_t) & (data['start_time'] <= self.date)] + # Sort the data by start_time before translating into strings + data.sort_values(by='start_time', ascending=False, inplace=True) + + # Now translate times to strings data['start_time'] = data['start_time'].map(lambda x: x.strftime('%m-%d-%Y %H:%M:%S')) data['end_time'] = data['end_time'].map(lambda x: x.strftime('%m-%d-%Y %H:%M:%S')) - # data = data.drop(columns='affected_tables') - table_values = data.sort_values(by='start_time', ascending=False).values - table_columns = data.columns.values - return table_columns, table_values + return data.columns.values, data.values def make_panel(self, x_value, top, instrument, title, x_axis_label): """Make tab panel for tablulated figure. @@ -259,14 +550,229 @@ def dashboard_exposure_count_by_filter(self): tabs : bokeh.models.widgets.widget.Widget A figure with tabs for each instrument. """ + # build_table_latest_query will return only the database entries with the latest date. This should + # correspond to one row/entry per instrument + data = build_table_latest_entry('filesystem_characteristics') - title = 'File Counts Per Filter' - figures = [self.make_panel(FILTERS_PER_INSTRUMENT[instrument], np.random.rand(len(FILTERS_PER_INSTRUMENT[instrument])) * 10e7, instrument, title, 'Filters') for instrument in FILTERS_PER_INSTRUMENT] + # Sort by instrument name so that the order of the tabs will always be the same + data = data.sort_values('instrument') + + figures = [] + # This is a loop over instruments + for i in range(len(data)): + instrument = data.iloc[i]['instrument'] + filterpupil = np.array(data.iloc[i]['filter_pupil']) + num_obs = np.array(data.iloc[i]['obs_per_filter_pupil']) + + # Sort by num_obs in order to make the plot more readable + idx = np.argsort(num_obs) + num_obs = num_obs[idx] + filterpupil = filterpupil[idx] + + # Normalize the number of observations using each filter by the total number of observations + total_obs = sum(num_obs) + num_obs = num_obs / total_obs * 100. + + data_dict = {} + for filt, val in zip(filterpupil, num_obs): + data_dict[filt] = val + + inst_data = pd.Series(data_dict).reset_index(name='value').rename(columns={'index': 'filter'}) + + if instrument != 'nircam': + # Calculate the angle covered by each filter + inst_data['angle'] = inst_data['value'] / inst_data['value'].sum() * 2 * np.pi + + # Keep all wedges the same color, except for those that are a very + # small fraction, and will be covered in the second pie chart. Make + # those wedges grey in the primary pie chart. + inst_data['colors'] = ['#c85108'] * len(inst_data) + inst_data.loc[inst_data['value'] < 0.5, 'colors'] = '#bec4d4' + + # Make a dataframe containing only the filters that are used in less + # than some threshold percentage of observations + small = inst_data.loc[inst_data['value'] < 0.5].copy() + + # Recompute the angles for these, and make them all the same color. + small['angle'] = small['value'] / small['value'].sum() * 2 * np.pi + small['colors'] = ['#bec4d4'] * len(small) + + # Create two pie charts + pie_fig = create_filter_based_pie_chart("Percentage of observations using filter/pupil combinations: All Filters", inst_data) + small_pie_fig = create_filter_based_pie_chart("Low Percentage Filters (gray wedges from above)", small) + + # Place the pie charts in a column/Panel, and append to the figure + colplots = column(pie_fig, small_pie_fig) + tab = Panel(child=colplots, title=f'{instrument}') + figures.append(tab) + + else: + # For NIRCam, we split the SW and LW channels and put each in its own tab. + # This will cut down on the number of entries in each and make the pie + # charts more readable. + + # Add a column designating the channel. Exclude darks. + channel = [] + for f in filterpupil: + if 'FLAT' in f: + channel.append('Dark') + elif f[0] == 'F': + wav = int(f[1:4]) + if wav < 220: + channel.append('SW') + else: + channel.append('LW') + else: + channel.append('SW') + inst_data['channel'] = channel + + # Set the colors. All wedges with a pie chart have the same color. + color_options = {'LW': '#c85108', 'SW': '#3d85c6', 'Dark': '#bec4d4'} + colors = [] + for entry in channel: + colors.append(color_options[entry]) + inst_data['colors'] = colors + + # Even though it's not quite correct, create separate charts for SW vs LW. This will + # hopefully make them much easier to read + sw_data = inst_data.loc[inst_data['channel'] == 'SW'].copy() + lw_data = inst_data.loc[inst_data['channel'] == 'LW'].copy() + + # Recalculate the angles. These won't be strictly correct since SW and LW filters + # are not both used exactly 50% of the time, but it's close enough for now. + sw_data['angle'] = sw_data['value'] / sw_data['value'].sum() * 2 * np.pi + lw_data['angle'] = lw_data['value'] / lw_data['value'].sum() * 2 * np.pi + + # Zoomed in version of the small contributors + sw_small = sw_data.loc[sw_data['value'] <0.5].copy() + lw_small = lw_data.loc[lw_data['value'] <0.5].copy() + sw_small['angle'] = sw_small['value'] / sw_small['value'].sum() * 2 * np.pi + lw_small['angle'] = lw_small['value'] / lw_small['value'].sum() * 2 * np.pi + sw_small['colors'] = ['#bec4d4'] * len(sw_small) + lw_small['colors'] = ['#bec4d4'] * len(lw_small) + + # Set the filters that are used in less than 0.5% of observations to be grey. + # These will be plotted in a second pie chart on theor own, in order to make + # them more readable. + sw_data.loc[sw_data['value'] < 0.5, 'colors'] = '#bec4d4' + lw_data.loc[lw_data['value'] < 0.5, 'colors'] = '#bec4d4' + + """ + Would be nice to keep this treemap code somewhere, so that once we upgrade to + bokeh 3.0, we can change the pie charts to treemaps, which should be easier to read + #########treemap####################### + ####treemap needs the squarify package, which would be a new dependency######## + ####https://docs.bokeh.org/en/3.0.0/docs/examples/topics/hierarchical/treemap.html### + ####this also requires bokeh version > 3.0.0, so we need to hold off on this + d = {'filter': filterpupil, 'num_obs': num_obs} + df = pd.DataFrame(data=d) + + # only for nircam, add a column for LW/SW + + + channel = [] + for f in filterpupil: + if 'FLAT' in f: + channel.append('Dark') + elif f[0] == 'F': + wav = int(f[1:4]) + if wav < 220: + channel.append('SW') + else: + channel.append('LW') + else: + channel.append('SW') + df['channel'] = channel + + filters = [] + pupils = [] + for f in filterpupil: + if f[0:3] != 'N/A': + filt, pup = f.split('/') + filters.append(filt) + pupils.append(pup) + else: + filters.append(f[0:3]) + pupils.append(f[4:]) + df['filters'] = filters + df['pupils'] = pupils + + regions = ('SW', 'LW', 'Dark') + + # Group by pupil value + obs_by_pupil = df.groupby(["channel", "pupil"]).sum("num_obs") + obs_by_pupil = obs_by_pupil.sort_values(by="num_obs").reset_index() + + # Get a total area for each channel + obs_by_channel = df.groupby(["channel"]).sum("num_obs") + + # Figure size + x, y, w, h = 0, 0, 800, 450 + + blocks_by_channel= treemap(obs_by_channel, "num_obs", x, y, w, h) + dfs = [] + for index, (channel, num_obs, x, y, dx, dy) in blocks_by_channel.iterrows(): + df = obs_by_pupil[obs_by_pupil.channel==channel] + dfs.append(treemap(df, "num_obs", x, y, dx, dy, N=10)) + blocks = pd.concat(dfs) + + p = figure(width=w, height=h, tooltips="@pupil", toolbar_location=None, + x_axis_location=None, y_axis_location=None) + p.x_range.range_padding = p.y_range.range_padding = 0 + p.grid.grid_line_color = None + + p.block('x', 'y', 'dx', 'dy', source=blocks, line_width=1, line_color="white", + fill_alpha=0.8, fill_color=factor_cmap("channel", "MediumContrast4", regions)) + + p.text('x', 'y', x_offset=2, text="Channel", source=blocks_by_channel, + text_font_size="18pt", text_color="white") + + blocks["ytop"] = blocks.y + blocks.dy + p.text('x', 'ytop', x_offset=2, y_offset=2, text="City", source=blocks, + text_font_size="6pt", text_baseline="top", + text_color=factor_cmap("Region", ("black", "white", "black", "white"), regions)) + + show(p) + """ + + + # Create pie charts for SW/LW, the main set of filters, and those that aren't used + # as much. + sw_pie_fig = create_filter_based_pie_chart("Percentage of observations using filter/pupil combinations: All Filters", sw_data) + sw_small_pie_fig = create_filter_based_pie_chart("Low Percentage Filters (gray wedges from above)", sw_small) + lw_pie_fig = create_filter_based_pie_chart("Percentage of observations using filter/pupil combinations: All Filters", lw_data) + lw_small_pie_fig = create_filter_based_pie_chart("Low Percentage Filters (gray wedges from above)", lw_small) + + # Create columns and Panels + sw_colplots = column(sw_pie_fig, sw_small_pie_fig) + lw_colplots = column(lw_pie_fig, lw_small_pie_fig) + + tab_sw = Panel(child=sw_colplots, title=f'{instrument} SW') + tab_lw = Panel(child=lw_colplots, title=f'{instrument} LW') + figures.append(tab_sw) + figures.append(tab_lw) + + # Add in a placeholder plot for FGS, in order to keep the page looking consistent + # from instrument to instrument + instrument = 'fgs' + data_dict = {} + data_dict['None'] = 100. + inst_data = pd.Series(data_dict).reset_index(name='value').rename(columns={'index': 'filter'}) + inst_data['angle'] = 2 * np.pi + inst_data['colors'] = ['#c85108'] + pie_fig = create_filter_based_pie_chart("FGS has no filters", inst_data) + small_pie_fig = create_filter_based_pie_chart("FGS has no filters", inst_data) + + # Place the pie charts in a column/Panel, and append to the figure + colplots = column(pie_fig, small_pie_fig) + tab = Panel(child=colplots, title=f'{instrument}') + figures.append(tab) tabs = Tabs(tabs=figures) return tabs + def dashboard_anomaly_per_instrument(self): """Create figure for number of anamolies for each JWST instrument. @@ -277,7 +783,7 @@ def dashboard_anomaly_per_instrument(self): """ # Set title and figures list to make panels - title = 'Anamoly Types per Instrument' + title = 'Anomaly Types per Instrument' figures = [] # For unique instrument values, loop through data @@ -288,8 +794,8 @@ def dashboard_anomaly_per_instrument(self): data = data.drop(columns=['id', 'rootname', 'user']) if not pd.isnull(self.delta_t) and not data.empty: data = data[(data['flag_date'] >= (self.date - self.delta_t)) & (data['flag_date'] <= self.date)] - summed_anamoly_columns = data.sum(axis=0).to_frame(name='counts') - figures.append(self.make_panel(summed_anamoly_columns.index.values, summed_anamoly_columns['counts'], instrument, title, 'Anomaly Type')) + summed_anomaly_columns = data.sum(axis=0, numeric_only=True).to_frame(name='counts') + figures.append(self.make_panel(summed_anomaly_columns.index.values, summed_anomaly_columns['counts'], instrument, title, 'Anomaly Type')) tabs = Tabs(tabs=figures) diff --git a/jwql/website/apps/jwql/bokeh_utils.py b/jwql/website/apps/jwql/bokeh_utils.py new file mode 100644 index 000000000..96eb497e4 --- /dev/null +++ b/jwql/website/apps/jwql/bokeh_utils.py @@ -0,0 +1,32 @@ +"""Various Bokeh-related utility functions for the ``jwql`` project. +Authors +------- + - Bryan Hilbert +Use +--- + This module can be imported as such: + >>> from jwql.website.apps.jwql.bokeh_utils import PlaceholderPlot + """ +from bokeh.models import ColumnDataSource, Text +from bokeh.plotting import figure + + +class PlaceholderPlot(): + def __init__(self, title, x_label, y_label): + self.title = title + self.x_label = x_label + self.y_label = y_label + self.create() + + def create(self): + self.plot = figure(title=self.title, tools='', background_fill_color="#fafafa") + self.plot.x_range.start = 0 + self.plot.x_range.end = 1 + self.plot.y_range.start = 0 + self.plot.y_range.end = 1 + + source = ColumnDataSource(data=dict(x=[0.5], y=[0.5], text=['No data'])) + glyph = Text(x="x", y="y", text="text", angle=0., text_color="navy", text_font_size={'value':'20px'}) + self.plot.add_glyph(source, glyph) + self.plot.xaxis.axis_label = self.x_label + self.plot.yaxis.axis_label = self.y_label diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index faaf5f894..296e29249 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -11,6 +11,10 @@ - Lauren Chambers - Matthew Bourque - Teagan King + - Bryan Hilbert + - Maria Pena-Guerrero + - Bradley Sappington + - Melanie Clarke Use --- @@ -25,32 +29,44 @@ import copy from collections import OrderedDict import glob +import json from operator import getitem import os import re import tempfile +import logging from astropy.io import fits -from astropy.table import Table from astropy.time import Time from bs4 import BeautifulSoup +from django import setup from django.conf import settings +from django.contrib import messages +from django.core.exceptions import ObjectDoesNotExist import numpy as np from operator import itemgetter import pandas as pd +import pyvo as vo import requests +from datetime import datetime from jwql.database import database_interface as di from jwql.database.database_interface import load_connection -from jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info -from jwql.instrument_monitors.miri_monitors.data_trending import dashboard as miri_dash -from jwql.instrument_monitors.nirspec_monitors.data_trending import dashboard as nirspec_dash +from jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info, mnemonic_inventory from jwql.utils.utils import check_config_for_key, ensure_dir_exists, filesystem_path, filename_parser, get_config -from jwql.utils.constants import MONITORS, PREVIEW_IMAGE_LISTFILE, THUMBNAIL_LISTFILE -from jwql.utils.constants import IGNORED_SUFFIXES, INSTRUMENT_SERVICE_MATCH, JWST_INSTRUMENT_NAMES_MIXEDCASE, \ - JWST_INSTRUMENT_NAMES_SHORTHAND -from jwql.utils.preview_image import PreviewImage +from jwql.utils.constants import MAST_QUERY_LIMIT, MONITORS, THUMBNAIL_LISTFILE, THUMBNAIL_FILTER_LOOK +from jwql.utils.constants import EXPOSURE_PAGE_SUFFIX_ORDER, IGNORED_SUFFIXES, INSTRUMENT_SERVICE_MATCH +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_INSTRUMENT_NAMES +from jwql.utils.constants import REPORT_KEYS_PER_INSTRUMENT +from jwql.utils.constants import SUFFIXES_TO_ADD_ASSOCIATION, SUFFIXES_WITH_AVERAGED_INTS, QueryConfigKeys from jwql.utils.credentials import get_mast_token +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import get_rootnames_for_instrument_proposal +from astroquery.mast import Mast + +# Increase the limit on the number of entries that can be returned by +# a MAST query. +Mast._portal_api_connection.PAGESIZE = MAST_QUERY_LIMIT # astroquery.mast import that depends on value of auth_mast # this import has to be made before any other import of astroquery.mast @@ -58,11 +74,20 @@ # Determine if the code is being run as part of a Readthedocs build ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: +if 'READTHEDOCS' in os.environ: # pragma: no cover ON_READTHEDOCS = os.environ['READTHEDOCS'] + if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: - from .forms import MnemonicSearchForm, MnemonicQueryForm, MnemonicExplorationForm + # These lines are needed in order to use the Django models in a standalone + # script (as opposed to code run as a result of a webpage request). If these + # lines are not run, the script will crash when attempting to import the + # Django models in the line below. + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + setup() + + from .forms import MnemonicSearchForm, MnemonicQueryForm, MnemonicExplorationForm, InstrumentAnomalySubmitForm + from jwql.website.apps.jwql.models import Observation, Proposal, RootFileInfo, Anomalies check_config_for_key('auth_mast') configs = get_config() auth_mast = configs['auth_mast'] @@ -70,19 +95,15 @@ from astropy import config conf = config.get_config('astroquery') conf['mast'] = {'server': 'https://{}'.format(mast_flavour)} -from astroquery.mast import Mast -from jwedb.edb_interface import mnemonic_inventory - -__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) -if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: FILESYSTEM_DIR = configs['filesystem'] PREVIEW_IMAGE_FILESYSTEM = configs['preview_image_filesystem'] THUMBNAIL_FILESYSTEM = configs['thumbnail_filesystem'] + OUTPUT_DIR = configs['outputs'] +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) PACKAGE_DIR = os.path.dirname(__location__.split('website')[0]) REPO_DIR = os.path.split(PACKAGE_DIR)[0] -# Temporary until JWST operations: switch to test string for MAST request URL if not ON_GITHUB_ACTIONS: Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] @@ -133,36 +154,178 @@ def build_table(tablename): return table_meta_data -def data_trending(): - """Container for Miri datatrending dashboard and components +def filter_root_files(instrument=None, proposal=None, obsnum=None, sort_as=None, + look=None, exp_type=None, cat_type=None, detector=None): + """Retrieve and filter root file table entries. + + Parameters + ---------- + instrument : str, optional + Name of the JWST instrument. + proposal : str, optional + Proposal to match. Used as a 'starts with' filter. + obsnum : str, optional + Observation number to match. + sort_as : {'ascending', 'descending', 'recent', 'oldest'}, optional + Sorting method for output table. Ascending and descending + options refer to root file name; recent and oldest sort by exposure + start time. + look : {'new', 'viewed'}, optional + If set to None, all viewed values are returned. If set to + 'viewed', only viewed data is returned. If set to 'new', only + new data is returned. + exp_type : str, optional + Set to filter by exposure type. + cat_type : str, optional + Set to filter by proposal category. + detector : str, optional + Set to filter by detector name. Returns ------- - variables : int - nonsense - dashboard : list - A list containing the JavaScript and HTML content for the - dashboard + root_file_info : QuerySet + List of RootFileInfo entries matching input criteria. """ - dashboard, variables = miri_dash.data_trending_dashboard() + # standardize input + + # get desired filters + filter_kwargs = dict() + if instrument is not None and str(instrument).strip().lower() != 'all': + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()] + filter_kwargs['instrument'] = inst + if proposal is not None and str(proposal).strip().lower() != 'all': + filter_kwargs['proposal__startswith'] = proposal.lstrip('0') + if obsnum is not None and str(obsnum).strip().lower() != 'all': + filter_kwargs['obsnum__obsnum'] = obsnum + if look is not None and str(look).strip().lower() != 'all': + filter_kwargs['viewed'] = (str(look).lower() == 'viewed') + if exp_type is not None and str(exp_type).strip().lower() != 'all': + filter_kwargs['exp_type__iexact'] = exp_type + if cat_type is not None and str(cat_type).strip().lower() != 'all': + filter_kwargs['obsnum__proposal__category__iexact'] = cat_type + if detector is not None and str(detector).strip().lower() != 'all': + filter_kwargs['detector__iexact'] = detector + + # get file info by instrument from local model + root_file_info = RootFileInfo.objects.filter(**filter_kwargs) + + # descending by root file is default; + # for other options, sort as desired + sort_as = str(sort_as).strip().lower() + if sort_as == 'ascending': + root_file_info = root_file_info.order_by('root_name') + elif sort_as == 'recent': + root_file_info = root_file_info.order_by('-expstart', 'root_name') + elif sort_as == 'oldest': + root_file_info = root_file_info.order_by('expstart', 'root_name') + + return root_file_info.values() + + +def create_archived_proposals_context(inst): + """Generate and save a json file containing the information needed + to create an instrument's archive page. - return variables, dashboard + Parameters + ---------- + inst : str + Name of JWST instrument + """ + # Ensure the instrument is correctly capitalized + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + # Get a list of Observation entries for the given instrument + all_entries = Observation.objects.filter(proposal__archive__instrument=inst) -def nirspec_trending(): - """Container for Miri datatrending dashboard and components + # Get a list of proposal numbers. + prop_objects = Proposal.objects.filter(archive__instrument=inst) + proposal_nums = [entry.prop_id for entry in prop_objects] - Returns - ------- - variables : int - nonsense - dashboard : list - A list containing the JavaScript and HTML content for the - dashboard - """ - dashboard, variables = nirspec_dash.data_trending_dashboard() + # Put proposals into descending order + proposal_nums.sort(reverse=True) - return variables, dashboard + # Total number of proposals for the instrument + num_proposals = len(proposal_nums) + + thumbnail_paths = [] + min_obsnums = [] + total_files = [] + proposal_viewed = [] + proposal_exp_types = [] + thumb_exp_types = [] + proposal_obs_times = [] + thumb_obs_time = [] + cat_types = [] + + # Get a set of all exposure types used in the observations associated with this proposal + exp_types = [exposure_type for observation in all_entries for exposure_type in observation.exptypes.split(',')] + exp_types = sorted(set(exp_types)) + + # Get all proposals based on category type + proposals_by_category = get_proposals_by_category(inst) + unique_cat_types = sorted(set(proposals_by_category.values())) + + # The naming conventions for dropdown_menus are tightly coupled with the code, this should be changed down the line. + dropdown_menus = {'look': THUMBNAIL_FILTER_LOOK, + 'exp_type': exp_types, + 'cat_type': unique_cat_types} + thumbnails_dict = {} + + for proposal_num in proposal_nums: + # For each proposal number, get all entries + prop_entries = all_entries.filter(proposal__prop_id=proposal_num) + + # All entries will have the same thumbnail_path, so just grab the first + thumbnail_paths.append(prop_entries[0].proposal.thumbnail_path) + + # Extract the observation numbers from each entry and find the minimum + prop_obsnums = [entry.obsnum for entry in prop_entries] + min_obsnums.append(min(prop_obsnums)) + + # Sum the file count from all observations to get the total file count for + # the proposal + prop_filecount = [entry.number_of_files for entry in prop_entries] + total_files.append(sum(prop_filecount)) + + # In order to know if a proposal contains all observations that + # are entirely viewed, check for at least one existing + # viewed=False in RootFileInfo + unviewed_root_file_infos = RootFileInfo.objects.filter(instrument=inst, proposal=proposal_num, viewed=False) + proposal_viewed.append("Viewed" if unviewed_root_file_infos.count() == 0 else "New") + + # Store comma separated list of exp_types associated with each proposal + proposal_exp_types = [exposure_type for observation in prop_entries for exposure_type in observation.exptypes.split(',')] + proposal_exp_types = list(set(proposal_exp_types)) + thumb_exp_types.append(','.join(proposal_exp_types)) + + # Get Most recent observation start time + proposal_obs_times = [observation.obsstart for observation in prop_entries] + thumb_obs_time.append(max(proposal_obs_times)) + + # Add category type to list based on proposal number + cat_types.append(proposals_by_category[int(proposal_num)]) + + thumbnails_dict['proposals'] = proposal_nums + thumbnails_dict['thumbnail_paths'] = thumbnail_paths + thumbnails_dict['num_files'] = total_files + thumbnails_dict['viewed'] = proposal_viewed + thumbnails_dict['exp_types'] = thumb_exp_types + thumbnails_dict['obs_time'] = thumb_obs_time + thumbnails_dict['cat_types'] = cat_types + + context = {'inst': inst, + 'num_proposals': num_proposals, + 'min_obsnum': min_obsnums, + 'thumbnails': thumbnails_dict, + 'dropdown_menus': dropdown_menus} + + json_object = json.dumps(context, indent=4) + + # Writing to json file + outfilename = os.path.join(OUTPUT_DIR, 'archive_page', f'{inst}_archive_context.json') + with open(outfilename, "w") as outfile: + outfile.write(json_object) + set_permissions(outfilename) def get_acknowledgements(): @@ -208,58 +371,168 @@ def get_all_proposals(): A list of proposal numbers for all proposals that exist in the filesystem """ + proprietary_proposals = os.listdir(os.path.join(FILESYSTEM_DIR, 'proprietary')) + public_proposals = os.listdir(os.path.join(FILESYSTEM_DIR, 'public')) + all_proposals = [prop[2:] for prop in proprietary_proposals + public_proposals if 'jw' in prop] + proposals = sorted(list(set(all_proposals)), reverse=True) + return proposals - proposals = glob.glob(os.path.join(FILESYSTEM_DIR, 'public', '*')) - proposals.extend(glob.glob(os.path.join(FILESYSTEM_DIR, 'proprietary', '*'))) - proposals = sorted(list(set(proposals))) - proposals = [proposal.split('jw')[-1] for proposal in proposals] - proposals = [proposal for proposal in proposals if len(proposal) == 5] - return proposals +def get_available_suffixes(all_suffixes, return_untracked=True): + """ + Put available suffixes in a consistent order. + + Any suffixes not recognized are returned at the end of the suffix + list, in random order. + Parameters + ---------- + all_suffixes : list of str + List of all data product suffixes found for a given file root. + return_untracked : bool, optional + If set, a set of untracked suffixes is also returned, for + logging or diagnostic purposes. + + Returns + ------- + suffixes : list of str + All available unique suffixes in standard order. + untracked_suffixes : set of str, optional + Any suffixes that were not recognized. + """ + # Check if any of the + # suffixes are not in the list that specifies order. + suffixes = [] + untracked_suffixes = set(all_suffixes) + for poss_suffix in EXPOSURE_PAGE_SUFFIX_ORDER: + if 'crf' not in poss_suffix: + if (poss_suffix in all_suffixes + and poss_suffix not in suffixes): + suffixes.append(poss_suffix) + untracked_suffixes.remove(poss_suffix) + else: + # EXPOSURE_PAGE_SUFFIX_ORDER contains crf and crfints, + # but the actual suffixes in the data will be e.g. o001_crf, + # and there may be more than one crf file in the list of suffixes. + # So in this case, we strip the e.g. o001 from the + # suffixes and check which list elements match. + for image_suffix in all_suffixes: + if (image_suffix.endswith(poss_suffix) and + image_suffix not in suffixes): + suffixes.append(image_suffix) + untracked_suffixes.remove(image_suffix) + + # If the data contain any suffixes that are not in the list + # that specifies the order to use, add them to the end of the + # suffixes list. Their order will be random since they are not in + # EXPOSURE_PAGE_SUFFIX_ORDER. + if len(untracked_suffixes) > 0: + suffixes.extend(untracked_suffixes) + + if return_untracked: + return suffixes, untracked_suffixes + else: + return suffixes -def get_current_flagged_anomalies(rootname, instrument): + +def get_current_flagged_anomalies(rootfileinfo_set): """Return a list of currently flagged anomalies for the given ``rootname`` + This function may be used to retrieve the current anomalies + for single rootfileinfo or sets of rootfileinfos in an exposure group. Group + anomalies are returned if they are true in every rootfileinfo in the set. + For single files, any anomaly present for the file is a current anomaly. + + Parameters ---------- - rootname : str - The rootname of interest (e.g. - ``jw86600008001_02101_00001_guider2/``) + rootfileinfo_set : RootFileInfo Queryset + A query set of 1 or more RootFileInfos of interest + Must be iterable, even if only one RootFileInfo. Returns ------- - current_anomalies : list - A list of currently flagged anomalies for the given ``rootname`` + current_anomalies : list of str + A list of currently flagged anomalies for the given rootfileinfo_set (e.g. ``['snowball', 'crosstalk']``) """ + all_anomalies = Anomalies.get_all_anomalies() + anomalies_set = [] + current_anomalies = [] + empty_anomaly_found = False + for rootfileinfo in rootfileinfo_set: + try: + anomalies_set.append(rootfileinfo.anomalies.get_marked_anomalies()) + except (ObjectDoesNotExist, AttributeError): + empty_anomaly_found = True + break + + if not empty_anomaly_found: + # If all RootFileInfos have anomalies, calculate which anomalies exist in every RootFileInfo + flat_list = [anomaly for sublist in anomalies_set for anomaly in sublist] + for anomaly in all_anomalies: + if flat_list.count(anomaly) == len(rootfileinfo_set): + current_anomalies.append(anomaly) - table_dict = {} - table_dict[instrument.lower()] = getattr(di, '{}Anomaly'.format(JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()])) + return current_anomalies - table = table_dict[instrument.lower()] - query = di.session.query(table).filter(table.rootname == rootname).order_by(table.flag_date.desc()).limit(1) - all_records = query.data_frame - if not all_records.empty: - current_anomalies = [col for col, val in np.sum(all_records, axis=0).items() if val] - else: - current_anomalies = [] +def get_anomaly_form(request, inst, file_root): + """Generate form data for context - return current_anomalies + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + inst : str + Name of JWST instrument + file_root : str + FITS filename of selected image in filesystem. May be a + file or group root name. + + Returns + ------- + InstrumentAnomalySubmitForm object + form object to be sent with context to template + """ + # Check for group root name + rootfileinfo_set = RootFileInfo.objects.filter(root_name__startswith=file_root) + # Determine current flagged anomalies + current_anomalies = get_current_flagged_anomalies(rootfileinfo_set) + # Create a form instance + form = InstrumentAnomalySubmitForm(request.POST or None, instrument=inst.lower(), initial={'anomaly_choices': current_anomalies}) + + # If this is a POST request and the form is filled out, process the form data + if request.method == 'POST': + anomaly_choices = dict(request.POST).get('anomaly_choices', []) + if form.is_valid(): + for rootfileinfo in rootfileinfo_set: + # for a group form submit, add any individual anomalies + # not in the original group set + if len(rootfileinfo_set) > 1: + file_current = get_current_flagged_anomalies([rootfileinfo]) + choices = anomaly_choices.copy() + for choice in file_current: + if choice not in current_anomalies: + choices.append(choice) + else: + choices = anomaly_choices + form.update_anomaly_table(rootfileinfo, 'unknown', choices) # TODO do we actually want usernames? + messages.success(request, "Anomaly submitted successfully") + else: + messages.error(request, "Failed to submit anomaly") + + return form def get_dashboard_components(request): - """Build and return dictionaries containing components and html - needed for the dashboard. + """Build and return a Dashboard class. Returns ------- - dashboard_components : dict - A dictionary containing components needed for the dashboard. - dashboard_html : dict - A dictionary containing full HTML needed for the dashboard. + dashboard_components : GeneralDashboard + The dashboard. """ from jwql.website.apps.jwql.bokeh_dashboard import GeneralDashboard @@ -344,7 +617,8 @@ def get_edb_components(request): mnemonic_query_result = get_mnemonic(mnemonic_identifier, start_time, end_time) if len(mnemonic_query_result.data) == 0: - mnemonic_query_status = "QUERY RESULT RETURNED NO DATA FOR {} ON DATES {} - {}".format(mnemonic_identifier, start_time, end_time) + mnemonic_query_status = "QUERY RESULT RETURNED NO DATA FOR {} ON DATES {} - {}".format(mnemonic_identifier, + start_time, end_time) else: mnemonic_query_status = 'SUCCESS' @@ -380,7 +654,7 @@ def get_edb_components(request): comments.append(' ') result_table.write(path_for_download, format='ascii.fixed_width', overwrite=True, delimiter=',', bookend=False) - mnemonic_query_result.file_for_download = file_for_download + mnemonic_query_result.file_for_download = path_for_download # create forms for search fields not clicked mnemonic_name_search_form = MnemonicSearchForm(prefix='mnemonic_name_search') @@ -429,7 +703,7 @@ def get_edb_components(request): path_for_download = os.path.join(static_dir, file_for_download) display_table.write(path_for_download, format='ascii.fixed_width', overwrite=True, delimiter=',', bookend=False) - mnemonic_exploration_result.file_for_download = file_for_download + mnemonic_exploration_result.file_for_download = path_for_download if mnemonic_exploration_result.n_rows == 0: mnemonic_exploration_result = 'empty' @@ -498,13 +772,19 @@ def get_expstart(instrument, rootname): return expstart -def get_filenames_by_instrument(instrument, restriction='all', query_file=None, query_response=None): +def get_filenames_by_instrument(instrument, proposal, observation_id=None, + restriction='all', query_file=None, + query_response=None, other_columns=None): """Returns a list of filenames that match the given ``instrument``. Parameters ---------- instrument : str The instrument of interest (e.g. `FGS`). + proposal : str + Proposal number to filter the results + observation_id : str + Observation number to filter the results restriction : str If ``all``, all filenames will be returned. If ``public``, only publicly-available filenames will be returned. If @@ -514,18 +794,25 @@ def get_filenames_by_instrument(instrument, restriction='all', query_file=None, filenames in this file will be used rather than calling mask_query_filenames_by_instrument. This can save a significant amount of time when the number of files is large. query_response : dict - Dictionary with "data" key ontaining a list of filenames. This is assumed to + Dictionary with "data" key containing a list of filenames. This is assumed to essentially be the returned value from a call to mast_query_filenames_by_instrument. If this is provided, the call to that function is skipped, which can save a significant amount of time. + other_columns : list + List of other columns to retrieve from the MAST query Returns ------- filenames : list A list of files that match the given instrument. + col_data : dict + Dictionary of other attributes returned from MAST. Keys are the attribute names + e.g. 'exptime', and values are lists of the value for each filename. e.g. ['59867.6, 59867.601'] """ if not query_file and not query_response: - result = mast_query_filenames_by_instrument(instrument) + result = mast_query_filenames_by_instrument( + instrument, proposal, observation_id=observation_id, + other_columns=other_columns) elif query_response: result = query_response @@ -533,20 +820,85 @@ def get_filenames_by_instrument(instrument, restriction='all', query_file=None, with open(query_file) as fobj: result = fobj.readlines() + if other_columns is not None: + col_data = {} + for element in other_columns: + col_data[element] = [] + # Determine filenames to return based on restriction parameter if restriction == 'all': filenames = [item['filename'] for item in result['data']] + if other_columns is not None: + for keyword in other_columns: + col_data[keyword] = [item[keyword] for item in result['data']] elif restriction == 'public': filenames = [item['filename'] for item in result['data'] if item['isRestricted'] is False] + if other_columns is not None: + for keyword in other_columns: + col_data[keyword] = [item[keyword] for item in result['data'] if item['isRestricted'] is False] elif restriction == 'proprietary': filenames = [item['filename'] for item in result['data'] if item['isRestricted'] is True] + if other_columns is not None: + for keyword in other_columns: + col_data[keyword] = [item[keyword] for item in result['data'] if item['isRestricted'] is True] else: raise KeyError('{} is not a valid restriction level. Use "all", "public", or "proprietary".'.format(restriction)) + if other_columns is not None: + return (filenames, col_data) + return filenames -def mast_query_filenames_by_instrument(instrument): +def mast_query_by_rootname(instrument, rootname): + """Query MAST for all columns given an instrument and rootname. Return the dict of the 'data' column + + Parameters + ---------- + instrument : str + The instrument of interest (e.g. `FGS`). + rootname : str + The Rootname of Interest + + Returns + ------- + result : dict + Dictionary of rootname data + """ + + query_filters = [] + if '-seg' in rootname: + root_split = rootname.split('-') + file_set_name = root_split[0] + root_split = rootname.split('_') + detector = root_split[-1] + else: + root_split = rootname.split('_') + file_set_name = '_'.join(root_split[:-1]) + detector = root_split[-1] + + service = INSTRUMENT_SERVICE_MATCH[instrument] + + query_filters.append({'paramName': 'fileSetName', 'values': [file_set_name]}) + query_filters.append({'paramName': 'detector', 'values': [detector.upper()]}) + params = {'columns': '*', + 'filters': query_filters} + try: + response = Mast.service_request_async(service, params) + result = response[0].json() + except Exception as e: + logging.error("Mast.service_request_async- {} - {}".format(file_set_name, e)) + result = {'data': []} + + retval = {} + if result['data'] == []: + print("WARNING: no data for {}".format(rootname)) + else: + retval = result['data'][0] + return retval + + +def mast_query_filenames_by_instrument(instrument, proposal_id, observation_id=None, other_columns=None): """Query MAST for filenames for the given instrument. Return the json response from MAST. @@ -554,14 +906,110 @@ def mast_query_filenames_by_instrument(instrument): ---------- instrument : str The instrument of interest (e.g. `FGS`). + proposal_id : str + Proposal ID number to use to filter the results + observation_id : str + Observation ID number to use to filter the results. If None, all files for the ``proposal_id`` are + retrieved + other_columns : list + List of other columns to return from the MAST query + + Returns + ------- + result : dict + Dictionary of file information """ + # Be sure the instrument name is properly capitalized + instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()] + + if other_columns is None: + columns = "filename, isRestricted" + else: + columns = "filename, isRestricted, " + ", ".join(other_columns) + service = INSTRUMENT_SERVICE_MATCH[instrument] - params = {"columns": "filename, isRestricted", "filters": []} + filters = [{'paramName': 'program', "values": [proposal_id]}] + if observation_id is not None: + filters.append({'paramName': 'observtn', 'values': [observation_id]}) + params = {"columns": columns, "filters": filters} response = Mast.service_request_async(service, params) result = response[0].json() return result +def get_filesystem_filenames(proposal=None, rootname=None, + file_types=None, full_path=False, + sort_names=True): + """Return a list of filenames on the filesystem. + + One of proposal or rootname must be specified. If both are + specified, only proposal is used. + + Parameters + ---------- + proposal : str, optional + The one- to five-digit proposal number (e.g. ``88600``). + rootname : str, optional + The rootname of interest (e.g. + ``jw86600008001_02101_00007_guider2``). + file_types : list of str, optional + If provided, only matching file extension types will be + returned (e.g. ['fits', 'jpg']). + full_path : bool, optional + If set, the full path to the file will be returned instead + of the basename. + sort_names : bool, optional + If set, the returned files are sorted. + + Returns + ------- + filenames : list + A list of filenames associated with the given ``rootname``. + """ + if proposal is not None: + proposal_string = '{:05d}'.format(int(proposal)) + filenames = glob.glob( + os.path.join(FILESYSTEM_DIR, 'public', + 'jw{}'.format(proposal_string), '*/*')) + filenames.extend(glob.glob( + os.path.join(FILESYSTEM_DIR, 'proprietary', + 'jw{}'.format(proposal_string), '*/*'))) + elif rootname is not None: + proposal_dir = rootname[0:7] + observation_dir = rootname.split('_')[0] + filenames = glob.glob( + os.path.join(FILESYSTEM_DIR, 'public', proposal_dir, + observation_dir, '{}*'.format(rootname))) + filenames.extend(glob.glob( + os.path.join(FILESYSTEM_DIR, 'proprietary', proposal_dir, + observation_dir, '{}*'.format(rootname)))) + else: + logging.warning("Must provide either proposal or rootname; " + "no files returned.") + filenames = [] + + # check suffix and file type + good_filenames = [] + for filename in filenames: + split_file = os.path.splitext(filename) + + # certain suffixes are always ignored + test_suffix = split_file[0].split('_')[-1] + if test_suffix not in IGNORED_SUFFIXES: + + # check against additional file type requirement + test_type = split_file[-1].lstrip('.') + if file_types is None or test_type in file_types: + if full_path: + good_filenames.append(filename) + else: + good_filenames.append(os.path.basename(filename)) + + if sort_names: + good_filenames.sort() + return good_filenames + + def get_filenames_by_proposal(proposal): """Return a list of filenames that are available in the filesystem for the given ``proposal``. @@ -576,16 +1024,7 @@ def get_filenames_by_proposal(proposal): filenames : list A list of filenames associated with the given ``proposal``. """ - - proposal_string = '{:05d}'.format(int(proposal)) - filenames = glob.glob(os.path.join(FILESYSTEM_DIR, 'public', 'jw{}'.format(proposal_string), '*/*')) - filenames.extend(glob.glob(os.path.join(FILESYSTEM_DIR, 'proprietary', 'jw{}'.format(proposal_string), '*/*'))) - - # Certain suffixes are always ignored - filenames = [filename for filename in filenames if os.path.splitext(filename).split('_')[-1] not in IGNORED_SUFFIXES] - filenames = sorted([os.path.basename(filename) for filename in filenames]) - - return filenames + return get_filesystem_filenames(proposal=proposal) def get_filenames_by_rootname(rootname): @@ -603,18 +1042,7 @@ def get_filenames_by_rootname(rootname): filenames : list A list of filenames associated with the given ``rootname``. """ - - proposal_dir = rootname[0:7] - observation_dir = rootname.split('_')[0] - - filenames = glob.glob(os.path.join(FILESYSTEM_DIR, 'public', proposal_dir, observation_dir, '{}*'.format(rootname))) - filenames.extend(glob.glob(os.path.join(FILESYSTEM_DIR, 'proprietary', proposal_dir, observation_dir, '{}*'.format(rootname)))) - - # Certain suffixes are always ignored - filenames = [filename for filename in filenames if os.path.splitext(filename).split('_')[-1] not in IGNORED_SUFFIXES] - filenames = sorted([os.path.basename(filename) for filename in filenames]) - - return filenames + return get_filesystem_filenames(rootname=rootname) def get_header_info(filename, filetype): @@ -650,11 +1078,13 @@ def get_header_info(filename, filetype): # Get header header = hdulist[ext].header - # Determine the extension name + # Determine the extension name and type if ext == 0: header_info[ext]['EXTNAME'] = 'PRIMARY' + header_info[ext]['XTENSION'] = 'PRIMARY' else: header_info[ext]['EXTNAME'] = header['EXTNAME'] + header_info[ext]['XTENSION'] = header['XTENSION'] # Get list of keywords and values exclude_list = ['', 'COMMENT'] @@ -678,7 +1108,7 @@ def get_header_info(filename, filetype): return header_info -def get_image_info(file_root, rewrite): +def get_image_info(file_root): """Build and return a dictionary containing information for a given ``file_root``. @@ -687,9 +1117,6 @@ def get_image_info(file_root, rewrite): file_root : str The rootname of the file of interest (e.g. ``jw86600008001_02101_00007_guider2``). - rewrite : bool - ``True`` if the corresponding JPEG needs to be rewritten, - ``False`` if not. Returns ------- @@ -704,15 +1131,23 @@ def get_image_info(file_root, rewrite): image_info['suffixes'] = [] image_info['num_ints'] = {} image_info['available_ints'] = {} + image_info['total_ints'] = {} + image_info['detectors'] = set() - # Find all of the matching files + # Find all the matching files proposal_dir = file_root[:7] observation_dir = file_root[:13] - filenames = glob.glob(os.path.join(FILESYSTEM_DIR, 'public', proposal_dir, observation_dir, '{}*.fits'.format(file_root))) - filenames.extend(glob.glob(os.path.join(FILESYSTEM_DIR, 'proprietary', proposal_dir, observation_dir, '{}*.fits'.format(file_root)))) + filenames = glob.glob( + os.path.join(FILESYSTEM_DIR, 'public', proposal_dir, + observation_dir, '{}*.fits'.format(file_root))) + filenames.extend(glob.glob( + os.path.join(FILESYSTEM_DIR, 'proprietary', proposal_dir, + observation_dir, '{}*.fits'.format(file_root)))) # Certain suffixes are always ignored - filenames = [filename for filename in filenames if os.path.splitext(filename)[0].split('_')[-1] not in IGNORED_SUFFIXES] + filenames = [filename for filename in filenames + if os.path.splitext(filename)[0].split('_')[-1] + not in IGNORED_SUFFIXES] image_info['all_files'] = filenames # Determine the jpg directory @@ -721,28 +1156,85 @@ def get_image_info(file_root, rewrite): for filename in image_info['all_files']: + parsed_fn = filename_parser(filename) + # Get suffix information - suffix = os.path.basename(filename).split('_')[4].split('.')[0] + suffix = parsed_fn['suffix'] + + # For crf or crfints suffixes, we need to also include the association value + # in the suffix, so that preview images can be found later. + if suffix in SUFFIXES_TO_ADD_ASSOCIATION: + assn = filename.split('_')[-2] + suffix = f'{assn}_{suffix}' + image_info['suffixes'].append(suffix) # Determine JPEG file location jpg_filename = os.path.basename(os.path.splitext(filename)[0] + '_integ0.jpg') jpg_filepath = os.path.join(jpg_dir, jpg_filename) - # Check that a jpg does not already exist. If it does (and rewrite=False), - # just call the existing jpg file - if os.path.exists(jpg_filepath) and not rewrite: - pass - - # Record how many integrations there are per filetype - jpgs = glob.glob(os.path.join(prev_img_filesys, observation_dir, '{}_{}_integ*.jpg'.format(file_root, suffix))) - image_info['num_ints'][suffix] = len(jpgs) - image_info['available_ints'][suffix] = sorted([int(jpg.split('_')[-1].replace('.jpg', '').replace('integ', '')) for jpg in jpgs]) + # Record how many integrations have been saved as preview images per filetype + jpgs = glob.glob(os.path.join(prev_img_filesys, proposal_dir, '{}*_{}_integ*.jpg'.format(file_root, suffix))) + image_info['available_ints'][suffix] = sorted(set([int(jpg.split('_')[-1].replace('.jpg', '').replace('integ', '')) for jpg in jpgs])) + image_info['num_ints'][suffix] = len(image_info['available_ints'][suffix]) image_info['all_jpegs'].append(jpg_filepath) + # Record how many integrations exist per filetype. + if suffix not in SUFFIXES_WITH_AVERAGED_INTS: + header = fits.getheader(filename) + nint = header['NINTS'] + if 'time_series' in parsed_fn['filename_type']: + # time series segments need special handling + intstart = header.get('INTSTART', 1) + intend = header.get('INTEND', nint) + image_info['total_ints'][suffix] = intend - intstart + 1 + elif image_info['num_ints'][suffix] > nint: + # so do data cubes: + # get max ints from data shape in first extension + sci_header = fits.getheader(filename, ext=1) + n_frame = sci_header.get('NAXIS3', nint) + + # for groups with multiple cubes (e.g. miri with ifu + # short and long), make sure we keep the highest total + if 'suffix' in image_info['total_ints']: + if n_frame > image_info['total_ints'][suffix]: + image_info['total_ints'][suffix] = n_frame + else: + image_info['total_ints'][suffix] = n_frame + else: + image_info['total_ints'][suffix] = nint + else: + image_info['total_ints'][suffix] = 1 + + # Record the detector used + image_info['detectors'].add(parsed_fn.get('detector', 'Unknown')) + return image_info +def get_explorer_extension_names(fits_file, filetype): + """ Return a list of Extensions that can be explored interactively + + Parameters + ---------- + filename : str + The name of the file of interest, without the extension + (e.g. ``'jw86600008001_02101_00007_guider2_uncal'``). + filetype : str + The type of the file of interest, (e.g. ``'uncal'``) + + Returns + ------- + extensions : list + List of Extensions found in header and allowed to be Explored (extension type "IMAGE") + """ + + header_info = get_header_info(fits_file, filetype) + + extensions = [header_info[extension]['EXTNAME'] for extension in header_info if header_info[extension]['XTENSION'] == 'IMAGE'] + return extensions + + def get_instrument_proposals(instrument): """Return a list of proposals for the given instrument @@ -754,60 +1246,90 @@ def get_instrument_proposals(instrument): Returns ------- - proposals : list + inst_proposals : list List of proposals for the given instrument """ - - service = "Mast.Jwst.Filtered.{}".format(instrument) - params = {"columns": "program", - "filters": []} - response = Mast.service_request_async(service, params) - results = response[0].json()['data'] - proposals = list(set(result['program'] for result in results)) - - return proposals + tap_service = vo.dal.TAPService("https://vao.stsci.edu/caomtap/tapservice.aspx") + tap_results = tap_service.search(f"""select distinct prpID from CaomObservation where collection='JWST' + and maxLevel>0 and insName like '{instrument.lower()}%'""") + prop_table = tap_results.to_table() + proposals = prop_table['prpID'].data + inst_proposals = sorted(proposals.compressed(), reverse=True) + return inst_proposals -def get_preview_images_by_instrument(inst): - """Return a list of preview images available in the filesystem for - the given instrument. +def get_instrument_looks(instrument, sort_as=None, proposal=None, + look=None, exp_type=None, cat_type=None, + additional_keys=None): + """Return a table of looks information for the given instrument. Parameters ---------- - inst : str - The instrument of interest (e.g. ``NIRCam``). + instrument : str + Name of the JWST instrument. + sort_as : {'ascending', 'descending', 'recent'} + Sorting method for output table. Ascending and descending + options refer to root file name; recent sorts by observation + start. + proposal : str, optional + Proposal to match. Used as a 'starts with' filter. + look : {'new', 'viewed'}, optional + If set to None, all viewed values are returned. If set to + 'viewed', only viewed data is returned. If set to 'new', only + new data is returned. + exp_type : str, optional + Set to filter by exposure type. + cat_type : str, optional + Set to filter by proposal category. + additional_keys : list of str, optional + Additional model attribute names for information to return. Returns ------- - preview_images : list - A list of preview images available in the filesystem for the - given instrument. + keys : list of str + Report values returned for the given instrument. + looks : list of dict + List of looks information by root file for the given instrument. """ + # standardize input + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()] + + # required keys + keys = ['root_name'] + + # optional keys by instrument + keys += REPORT_KEYS_PER_INSTRUMENT[inst.lower()] + + # add any additional keys + key_set = set(keys) + if additional_keys is not None: + for key in additional_keys: + if key not in key_set: + keys.append(key) + + # get filtered file info + root_file_info = filter_root_files( + instrument=instrument, sort_as=sort_as, look=look, + exp_type=exp_type, cat_type=cat_type, proposal=proposal) + + looks = [] + for root_file in root_file_info: + result = dict() + for key in keys: + try: + # try the root file table + value = root_file[key] + except KeyError: + value = '' - # Make sure the instrument is of the proper format (e.g. "Nircam") - instrument = inst[0].upper() + inst[1:].lower() - - # Query MAST for all rootnames for the instrument - service = "Mast.Jwst.Filtered.{}".format(instrument) - params = {"columns": "filename", - "filters": []} - response = Mast.service_request_async(service, params) - results = response[0].json()['data'] - - # Parse the results to get the rootnames - filenames = [result['filename'].split('.')[0] for result in results] - - # Get list of all preview_images. Text file contains only preview - # images for a single instrument. - preview_list_file = f"{PREVIEW_IMAGE_LISTFILE}_{inst.lower()}.txt" - preview_images = retrieve_filelist(os.path.join(PREVIEW_IMAGE_FILESYSTEM, preview_list_file)) + # make sure value can be serialized + if type(value) not in [str, float, int, bool]: + value = str(value) - # Get subset of preview images that match the filenames - preview_images = [os.path.basename(item) for item in preview_images if - os.path.basename(item).split('_integ')[0] in filenames] + result[key] = value + looks.append(result) - # Return only - return preview_images + return keys, looks def get_preview_images_by_proposal(proposal): @@ -829,7 +1351,7 @@ def get_preview_images_by_proposal(proposal): proposal_string = '{:05d}'.format(int(proposal)) preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, 'jw{}'.format(proposal_string), '*')) preview_images = [os.path.basename(preview_image) for preview_image in preview_images] - preview_images = [item for item in preview_images if os.path.splitext(item).split('_')[-1] not in IGNORED_SUFFIXES] + preview_images = [item for item in preview_images if os.path.splitext(item)[0].split('_')[-1] not in IGNORED_SUFFIXES] return preview_images @@ -857,11 +1379,39 @@ def get_preview_images_by_rootname(rootname): 'jw{}'.format(proposal), '{}*'.format(rootname)))) preview_images = [os.path.basename(preview_image) for preview_image in preview_images] - preview_images = [item for item in preview_images if os.path.splitext(item).split('_')[-1] not in IGNORED_SUFFIXES] + preview_images = [item for item in preview_images if os.path.splitext(item)[0].split('_')[-1] not in IGNORED_SUFFIXES] return preview_images +def get_proposals_by_category(instrument): + """Return a dictionary of program numbers based on category type + Parameters + ---------- + instrument : str + Name of the JWST instrument, with first letter capitalized + (e.g. ``Fgs``) + Returns + ------- + category_sorted_dict : dict + Dictionary with category as the key and a list of program id's as the value + """ + + service = "Mast.Jwst.Filtered.{}".format(instrument) + params = {"columns": "program, category", + "filters": [{'paramName':'instrume', 'values':[instrument]}]} + response = Mast.service_request_async(service, params) + results = response[0].json()['data'] + + # Get all unique dictionaries + unique_results = list(map(dict, set(tuple(sorted(sub.items())) for sub in results))) + + # Make a dictionary of {program: category} to pull from + proposals_by_category = {d['program']: d['category'] for d in unique_results} + + return proposals_by_category + + def get_proposal_info(filepaths): """Builds and returns a dictionary containing various information about the proposal(s) that correspond to the given ``filepaths``. @@ -887,12 +1437,22 @@ def get_proposal_info(filepaths): num_files = [] # Gather thumbnails and counts for proposals - proposals, thumbnail_paths, num_files = [], [], [] + proposals, thumbnail_paths, num_files, observations = [], [], [], [] for filepath in filepaths: proposal = filepath.split('/')[-1][2:7] if proposal not in proposals: thumbnail_paths.append(os.path.join('jw{}'.format(proposal), 'jw{}.thumb'.format(proposal))) files_for_proposal = [item for item in filepaths if 'jw{}'.format(proposal) in item] + + obsnums = [] + for fname in files_for_proposal: + try: + obs = filename_parser(fname)['observation'] + obsnums.append(obs) + except KeyError: + pass + obsnums = sorted(obsnums) + observations.extend(obsnums) num_files.append(len(files_for_proposal)) proposals.append(proposal) @@ -902,109 +1462,132 @@ def get_proposal_info(filepaths): proposal_info['proposals'] = proposals proposal_info['thumbnail_paths'] = thumbnail_paths proposal_info['num_files'] = num_files + proposal_info['observation_nums'] = observations return proposal_info -def get_thumbnails_all_instruments(parameters): - """Return a list of thumbnails available in the filesystem for all - instruments given requested MAST parameters and queried anomalies. +def get_rootnames_for_proposal(proposal): + """Return a list of rootnames for the given proposal (all instruments) Parameters ---------- - parameters: dict - A dictionary containing the following keys, some of which are dictionaries: - instruments - apertures - filters - detector - effexptm_min - effexptm_max - anomalies + proposal : int or str + Proposal ID number Returns ------- - thumbnails : list - A list of thumbnails available in the filesystem for the - given instrument. + rootnames : list + List of rootnames for the given instrument and proposal number """ + tap_service = vo.dal.TAPService("https://vao.stsci.edu/caomtap/tapservice.aspx") + tap_results = tap_service.search(f"""select observationID from dbo.CaomObservation where + collection='JWST' and maxLevel=2 and prpID='{int(proposal)}'""") + prop_table = tap_results.to_table() + rootnames = prop_table['observationID'].data + return rootnames.compressed() - anomalies = parameters['anomalies'] - - thumbnails_subset = [] - - for inst in parameters['instruments']: - # Make sure instruments are of the proper format (e.g. "Nircam") - instrument = inst[0].upper() + inst[1:].lower() - - # Query MAST for all rootnames for the instrument - service = "Mast.Jwst.Filtered.{}".format(instrument) - - if ((parameters['apertures'][inst.lower()] == []) - and (parameters['detectors'][inst.lower()] == []) - and (parameters['filters'][inst.lower()] == []) - and (parameters['exposure_types'][inst.lower()] == []) - and (parameters['read_patterns'][inst.lower()] == [])): - params = {"columns": "*", "filters": []} - else: - query_filters = [] - if (parameters['apertures'][inst.lower()] != []): - if instrument != "Nircam": - query_filters.append({"paramName": "pps_aper", "values": parameters['apertures'][inst.lower()]}) - if instrument == "Nircam": - query_filters.append({"paramName": "apername", "values": parameters['apertures'][inst.lower()]}) - if (parameters['detectors'][inst.lower()] != []): - query_filters.append({"paramName": "detector", "values": parameters['detectors'][inst.lower()]}) - if (parameters['filters'][inst.lower()] != []): - query_filters.append({"paramName": "filter", "values": parameters['filters'][inst.lower()]}) - if (parameters['exposure_types'][inst.lower()] != []): - query_filters.append({"paramName": "exp_type", "values": parameters['exposure_types'][inst.lower()]}) - if (parameters['read_patterns'][inst.lower()] != []): - query_filters.append({"paramName": "readpatt", "values": parameters['read_patterns'][inst.lower()]}) - params = {"columns": "*", - "filters": query_filters} - - response = Mast.service_request_async(service, params) - results = response[0].json()['data'] - inst_filenames = [result['filename'].split('.')[0] for result in results] - inst_filenames = [filename for filename in inst_filenames if os.path.splitext(filename).split('_')[-1] not in IGNORED_SUFFIXES] - filenames.extend(inst_filenames) +def get_rootnames_from_query(parameters): + """Return a query_set of RootFileInfo given requested filter parameters. - # Get list of all thumbnails - thumbnail_list_file = f"{THUMBNAIL_LISTFILE}_{inst.lower()}.txt" - thumbnail_inst_list = retrieve_filelist(os.path.join(THUMBNAIL_FILESYSTEM, THUMBNAIL_LISTFILE)) + Parameters + ---------- + parameters: dict + A dictionary containing keys of QUERY_CONFIG_KEYS, some of which are dictionaries: - # Get subset of thumbnail images that match the filenames - thumbnails_inst_subset = [os.path.basename(item) for item in thumbnail_inst_list if - os.path.basename(item).split('_integ')[0] in inst_filenames] - # Eliminate any duplicates - thumbnails_inst_subset = list(set(thumbnails_inst_subset)) - thumbnails_subset.extend(thumbnails_inst_subset) + Returns + ------- + filtered_rootnames : list + A list of all root filenames filtered from the given parameters + """ - # Determine whether or not queried anomalies are flagged - final_subset = [] + filtered_rootnames = [] + DATE_FORMAT = "%Y/%m/%d %I:%M%p" # noqa n806 + + # Parse DATE_RANGE string into correct format + date_range = parameters[QueryConfigKeys.DATE_RANGE] + start_date_range, stop_date_range = date_range.split(" - ") + # Parse the strings into datetime objects + start_datetime = datetime.strptime(start_date_range, DATE_FORMAT) + stop_datetime = datetime.strptime(stop_date_range, DATE_FORMAT) + # store as astroquery Time objects in isot format to be used in filter (with mjd format) + start_time = Time(start_datetime.isoformat(), format="isot") + stop_time = Time(stop_datetime.isoformat(), format="isot") + + # Each Query Selection is Instrument specific + for inst in parameters[QueryConfigKeys.INSTRUMENTS]: + # Make sure instruments are of the proper format for the archive query + inst = inst.lower() + current_ins_rootfileinfos = RootFileInfo.objects.filter(instrument=JWST_INSTRUMENT_NAMES_MIXEDCASE[inst]) + + # General fields + sort_type = parameters[QueryConfigKeys.SORT_TYPE] + look_status = parameters[QueryConfigKeys.LOOK_STATUS] + + # Get a queryset of all observations STARTING within our date range + current_ins_rootfileinfos = current_ins_rootfileinfos.filter( + expstart__gte=start_time.mjd) + current_ins_rootfileinfos = current_ins_rootfileinfos.filter( + expstart__lte=stop_time.mjd) + + if len(look_status) == 1: + viewed = (look_status[0] == 'VIEWED') + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(viewed=viewed) + proposal_category = parameters[QueryConfigKeys.PROPOSAL_CATEGORY] + if len(proposal_category) > 0: + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(obsnum__proposal__category__in=proposal_category) + + # Instrument fields + inst_anomalies = parameters[QueryConfigKeys.ANOMALIES][inst] + inst_aperture = parameters[QueryConfigKeys.APERTURES][inst] + inst_detector = parameters[QueryConfigKeys.DETECTORS][inst] + inst_exp_type = parameters[QueryConfigKeys.EXP_TYPES][inst] + inst_filter = parameters[QueryConfigKeys.FILTERS][inst] + inst_grating = parameters[QueryConfigKeys.GRATINGS][inst] + inst_pupil = parameters[QueryConfigKeys.PUPILS][inst] + inst_read_patt = parameters[QueryConfigKeys.READ_PATTS][inst] + inst_subarray = parameters[QueryConfigKeys.SUBARRAYS][inst] + + if (inst_aperture != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(aperture__in=inst_aperture) + if (inst_detector != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(detector__in=inst_detector) + if (inst_exp_type != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(exp_type__in=inst_exp_type) + if (inst_filter != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(filter__in=inst_filter) + if (inst_grating != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(grating__in=inst_grating) + if (inst_pupil != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(pupil__in=inst_pupil) + if (inst_read_patt != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(read_patt__in=inst_read_patt) + if (inst_subarray != []): + current_ins_rootfileinfos = current_ins_rootfileinfos.filter(subarray__in=inst_subarray) + if (inst_anomalies != []): + anomaly_rootfileinfos = RootFileInfo.objects.none() + for anomaly in inst_anomalies: + # If the rootfile info has any of the marked anomalies we want it + anomaly_filter = "anomalies__" + str(anomaly).lower() + anomaly_rootfileinfos = anomaly_rootfileinfos.union(current_ins_rootfileinfos.filter(**{anomaly_filter: True})) + current_ins_rootfileinfos = current_ins_rootfileinfos.intersection(anomaly_rootfileinfos) + + # sort as desired + if sort_type.upper() == 'ASCENDING': + current_ins_rootfileinfos = current_ins_rootfileinfos.order_by('root_name') + elif sort_type.upper() == 'RECENT': + current_ins_rootfileinfos = current_ins_rootfileinfos.order_by('-expstart', 'root_name') + elif sort_type.upper() == 'OLDEST': + current_ins_rootfileinfos = current_ins_rootfileinfos.order_by('expstart', 'root_name') + else: + current_ins_rootfileinfos = current_ins_rootfileinfos.order_by('-root_name') - if anomalies != {'miri': [], 'nirspec': [], 'niriss': [], 'nircam': [], 'fgs': []}: - for thumbnail in thumbnails_subset: - components = thumbnail.split('_') - rootname = ''.join((components[0], '_', components[1], '_', components[2], '_', components[3])) - try: - instrument = filename_parser(thumbnail)['instrument'] - thumbnail_anomalies = get_current_flagged_anomalies(rootname, instrument) - if thumbnail_anomalies: - for anomaly in anomalies[instrument.lower()]: - if anomaly.lower() in thumbnail_anomalies: - # thumbnail contains an anomaly selected in the query - final_subset.append(thumbnail) - except KeyError: - print("Error with thumbnail: ", thumbnail) - else: - # if no anomalies are flagged, return all thumbnails from query - final_subset = thumbnails_subset + rootnames = [name[0] for name in current_ins_rootfileinfos.values_list('root_name')] + filtered_rootnames.extend(rootnames) - return list(set(final_subset)) + return filtered_rootnames def get_thumbnails_by_instrument(inst): @@ -1022,26 +1605,24 @@ def get_thumbnails_by_instrument(inst): A list of thumbnails available in the filesystem for the given instrument. """ + # Get list of all thumbnails + thumb_inventory = f'{THUMBNAIL_LISTFILE}_{inst.lower()}.txt' + all_thumbnails = retrieve_filelist(os.path.join(THUMBNAIL_FILESYSTEM, thumb_inventory)) - # Make sure the instrument is of the proper format (e.g. "Nircam") - instrument = inst[0].upper() + inst[1:].lower() + thumbnails = [] + all_proposals = get_instrument_proposals(inst) + for proposal in all_proposals: + results = mast_query_filenames_by_instrument(inst, proposal) - # Query MAST for all rootnames for the instrument - service = "Mast.Jwst.Filtered.{}".format(instrument) - params = {"columns": "filename", - "filters": []} - response = Mast.service_request_async(service, params) - results = response[0].json()['data'] + # Parse the results to get the rootnames + filenames = [result['filename'].split('.')[0] for result in results] - # Parse the results to get the rootnames - filenames = [result['filename'].split('.')[0] for result in results] + if len(filenames) > 0: + # Get subset of preview images that match the filenames + prop_thumbnails = [os.path.basename(item) for item in all_thumbnails if + os.path.basename(item).split('_integ')[0] in filenames] - # Get list of all thumbnails - thumbnails = retrieve_filelist(os.path.join(THUMBNAIL_FILESYSTEM, THUMBNAIL_LISTFILE)) - - # Get subset of preview images that match the filenames - thumbnails = [os.path.basename(item) for item in thumbnails if - os.path.basename(item).split('_integ')[0] in filenames] + thumbnails.extend(prop_thumbnails) return thumbnails @@ -1069,9 +1650,11 @@ def get_thumbnails_by_proposal(proposal): return thumbnails -def get_thumbnails_by_rootname(rootname): - """Return a list of preview images available in the filesystem for - the given ``rootname``. +def get_thumbnail_by_rootname(rootname): + """Return the most appropriate existing thumbnail basename available in the filesystem for the given ``rootname``. + We generate thumbnails only for 'rate' and 'dark' files. + Check if these files exist in the thumbnail filesystem. + In the case where neither rate nor dark thumbnails are present, revert to 'none' Parameters ---------- @@ -1081,9 +1664,8 @@ def get_thumbnails_by_rootname(rootname): Returns ------- - thumbnails : list - A list of preview images available in the filesystem for the - given ``rootname``. + thumbnail_basename : str + A thumbnail_basename available in the filesystem for the given ``rootname``. """ proposal = rootname.split('_')[0].split('jw')[-1][0:5] @@ -1093,8 +1675,16 @@ def get_thumbnails_by_rootname(rootname): '{}*'.format(rootname)))) thumbnails = [os.path.basename(thumbnail) for thumbnail in thumbnails] + thumbnail_basename = 'none' - return thumbnails + if len(thumbnails) > 0: + preferred = [thumb for thumb in thumbnails if 'rate' in thumb] + if len(preferred) == 0: + preferred = [thumb for thumb in thumbnails if 'dark' in thumb] + if len(preferred) > 0: + thumbnail_basename = os.path.basename(preferred[0]) + + return thumbnail_basename def log_into_mast(request): @@ -1121,6 +1711,27 @@ def log_into_mast(request): return False +def proposal_rootnames_by_instrument(proposal): + """Retrieve the rootnames for a given proposal for all instruments and return + as a dictionary with instrument names as keys. Instruments not used in the proposal + will not be present in the dictionary. + + proposal : int or str + Proposal ID number + + Returns + ------- + rootnames : dict + Dictionary of rootnames with instrument names as keys + """ + rootnames = {} + for instrument in JWST_INSTRUMENT_NAMES: + names = get_rootnames_for_instrument_proposal(instrument, proposal) + if len(names) > 0: + rootnames[instrument] = names + return rootnames + + def random_404_page(): """Randomly select one of the various 404 templates for JWQL @@ -1167,69 +1778,82 @@ def text_scrape(prop_id): # Generate url url = 'http://www.stsci.edu/cgi-bin/get-proposal-info?id=' + str(prop_id) + '&submit=Go&observatory=JWST' html = BeautifulSoup(requests.get(url).text, 'lxml') - lines = html.findAll('p') - lines = [str(line) for line in lines] + not_available = "not available via this interface" in html.text + not_available |= "temporarily unable" in html.text program_meta = {} program_meta['prop_id'] = prop_id - program_meta['phase_two'] = ' Phase Two' + if not not_available: + lines = html.findAll('p') + lines = [str(line) for line in lines] - if prop_id[0] == '0': - program_meta['phase_two'] = program_meta['phase_two'].format(prop_id[1:]) + program_meta['phase_two'] = ' Phase Two' + + if prop_id[0] == '0': + program_meta['phase_two'] = program_meta['phase_two'].format(prop_id[1:]) + else: + program_meta['phase_two'] = program_meta['phase_two'].format(prop_id) + + program_meta['phase_two'] = BeautifulSoup(program_meta['phase_two'], 'html.parser') + + links = html.findAll('a') + + proposal_type = links[0].contents[0] + + program_meta['prop_type'] = proposal_type + + # Scrape for titles/names/contact persons + for line in lines: + if 'Title' in line: + start = line.find('') + 4 + end = line.find('<', start) + title = line[start:end] + program_meta['title'] = title + + if 'Principal Investigator:' in line: + start = line.find('') + 4 + end = line.find('<', start) + pi = line[start:end] + program_meta['pi'] = pi + + if 'Program Coordinator' in line: + start = line.find('') + 4 + mid = line.find('<', start) + end = line.find('>', mid) + 1 + pc = line[mid:end] + line[start:mid] + '' + program_meta['pc'] = pc + + if 'Contact Scientist' in line: + start = line.find('') + 4 + mid = line.find('<', start) + end = line.find('>', mid) + 1 + cs = line[mid:end] + line[start:mid] + '' + program_meta['cs'] = BeautifulSoup(cs, 'html.parser') + + if 'Program Status' in line: + start = line.find('') + ps = line[start:end] + + # beautiful soupify text to build absolute link + ps = BeautifulSoup(ps, 'html.parser') + ps_link = ps('a')[0] + ps_link['href'] = 'https://www.stsci.edu' + ps_link['href'] + ps_link['target'] = '_blank' + program_meta['ps'] = ps_link else: - program_meta['phase_two'] = program_meta['phase_two'].format(prop_id) - - program_meta['phase_two'] = BeautifulSoup(program_meta['phase_two'], 'html.parser') - - links = html.findAll('a') - proposal_type = links[0].contents[0] - - program_meta['prop_type'] = proposal_type - - # Scrape for titles/names/contact persons - for line in lines: - if 'Title' in line: - start = line.find('') + 4 - end = line.find('<', start) - title = line[start:end] - program_meta['title'] = title - - if 'Principal Investigator:' in line: - start = line.find('') + 4 - end = line.find('<', start) - pi = line[start:end] - program_meta['pi'] = pi - - if 'Program Coordinator' in line: - start = line.find('') + 4 - mid = line.find('<', start) - end = line.find('>', mid) + 1 - pc = line[mid:end] + line[start:mid] + '' - program_meta['pc'] = pc - - if 'Contact Scientist' in line: - start = line.find('') + 4 - mid = line.find('<', start) - end = line.find('>', mid) + 1 - cs = line[mid:end] + line[start:mid] + '' - program_meta['cs'] = BeautifulSoup(cs, 'html.parser') - - if 'Program Status' in line: - start = line.find('') - ps = line[start:end] - - # beautiful soupify text to build absolute link - ps = BeautifulSoup(ps, 'html.parser') - ps_link = ps('a')[0] - ps_link['href'] = 'https://www.stsci.edu' + ps_link['href'] - ps_link['target'] = '_blank' - program_meta['ps'] = ps_link + program_meta['phase_two'] = 'N/A' + program_meta['prop_type'] = 'N/A' + program_meta['title'] = 'Proposal not available or does not exist' + program_meta['pi'] = 'N/A' + program_meta['pc'] = 'N/A' + program_meta['cs'] = 'N/A' + program_meta['ps'] = 'N/A' return program_meta -def thumbnails_ajax(inst, proposal=None): +def thumbnails_ajax(inst, proposal, obs_num=None): """Generate a page that provides data necessary to render the ``thumbnails`` template. @@ -1239,43 +1863,54 @@ def thumbnails_ajax(inst, proposal=None): Name of JWST instrument proposal : str (optional) Number of APT proposal to filter + obs_num : str (optional) + Observation number Returns ------- data_dict : dict Dictionary of data needed for the ``thumbnails`` template """ + # generate the list of all obs of the proposal here, so that the list can be + # properly packaged up and sent to the js scripts. but to do this, we need to call + # get_rootnames_for_instrument_proposal, which is largely repeating the work done by + # get_filenames_by_instrument above. can we use just get_rootnames? we would have to + # filter results by obs_num after the call and after obs_list is created. + # But we need the filename list below...hmmm...so maybe we need to do both + all_rootnames = get_rootnames_for_instrument_proposal(inst, proposal) + all_obs = [] + for root in all_rootnames: + # Wrap in try/except because level 3 rootnames won't have an observation + # number returned by the filename_parser. That's fine, we're not interested + # in those files anyway. + try: + all_obs.append(filename_parser(root)['observation']) + except KeyError: + pass + obs_list = sorted(list(set(all_obs))) # Get the available files for the instrument - filenames = get_filenames_by_instrument(inst) + filenames, columns = get_filenames_by_instrument(inst, proposal, observation_id=obs_num, other_columns=['expstart', 'exp_type']) # Get set of unique rootnames rootnames = set(['_'.join(f.split('/')[-1].split('_')[:-1]) for f in filenames]) - # If the proposal is specified (i.e. if the page being loaded is - # an archive page), only collect data for given proposal - if proposal is not None: - proposal_string = '{:05d}'.format(int(proposal)) - rootnames = [rootname for rootname in rootnames if rootname[2:7] == proposal_string] - # Initialize dictionary that will contain all needed data - data_dict = {} - data_dict['inst'] = inst - data_dict['file_data'] = {} + data_dict = {'inst': inst, + 'file_data': dict()} + exp_types = set() + exp_groups = set() - # Gather data for each rootname + # Gather data for each rootname, and construct a list of all observations + # in the proposal for rootname in rootnames: # Parse filename try: filename_dict = filename_parser(rootname) - # The detector keyword is expected in thumbnails_query_ajax() for generating filterable dropdown menus - if 'detector' not in filename_dict.keys(): - filename_dict['detector'] = 'Unknown' - # Weed out file types that are not supported by generate_preview_images - if filename_dict['filename_type'] in ['stage_3_target_id']: + if 'stage_3' in filename_dict['filename_type']: continue except ValueError: @@ -1287,21 +1922,49 @@ def thumbnails_ajax(inst, proposal=None): 'parallel_seq_id': rootname[16], 'program_id': rootname[2:7], 'visit': rootname[10:13], - 'visit_group': rootname[14:16]} + 'visit_group': rootname[14:16], + 'group_root': rootname[:26]} + + # Get list of available filenames and exposure start times. All files with a given + # rootname will have the same exposure start time, so just keep the first. + available_files = [] + exp_start = None + exp_type = None + for i, item in enumerate(filenames): + if rootname in item: + available_files.append(item) + if exp_start is None: + exp_start = columns['expstart'][i] + exp_type = columns['exp_type'][i] + exp_types.add(exp_type) + + # Viewed is stored by rootname in the Model db. Save it with the data_dict + # THUMBNAIL_FILTER_LOOK is boolean accessed according to a viewed flag + try: + root_file_info = RootFileInfo.objects.get(root_name=rootname) + viewed = THUMBNAIL_FILTER_LOOK[root_file_info.viewed] + except RootFileInfo.DoesNotExist: + viewed = THUMBNAIL_FILTER_LOOK[0] - # Get list of available filenames - available_files = [item for item in filenames if rootname in item] + # Add to list of all exposure groups + exp_groups.add(filename_dict['group_root']) # Add data to dictionary data_dict['file_data'][rootname] = {} data_dict['file_data'][rootname]['filename_dict'] = filename_dict data_dict['file_data'][rootname]['available_files'] = available_files - data_dict['file_data'][rootname]['suffixes'] = [filename_parser(filename)['suffix'] for filename in available_files] + data_dict['file_data'][rootname]['viewed'] = viewed + data_dict['file_data'][rootname]['exp_type'] = exp_type + data_dict['file_data'][rootname]['thumbnail'] = get_thumbnail_by_rootname(rootname) + try: - data_dict['file_data'][rootname]['expstart'] = get_expstart(inst, rootname) - data_dict['file_data'][rootname]['expstart_iso'] = Time(data_dict['file_data'][rootname]['expstart'], format='mjd').iso.split('.')[0] - except: - print("issue with get_expstart for {}".format(rootname)) + data_dict['file_data'][rootname]['expstart'] = exp_start + data_dict['file_data'][rootname]['expstart_iso'] = Time(exp_start, format='mjd').iso.split('.')[0] + except (ValueError, TypeError) as e: + logging.warning("Unable to populate exp_start info for {}".format(rootname)) + logging.warning(e) + except KeyError: + print("KeyError with get_expstart for {}".format(rootname)) # Extract information for sorting with dropdown menus # (Don't include the proposal as a sorting parameter if the proposal has already been specified) @@ -1314,10 +1977,14 @@ def thumbnails_ajax(inst, proposal=None): pass if proposal is not None: - dropdown_menus = {'detector': sorted(detectors)} + dropdown_menus = {'detector': sorted(detectors), + 'look': THUMBNAIL_FILTER_LOOK, + 'exp_type': sorted(exp_types)} else: dropdown_menus = {'detector': sorted(detectors), - 'proposal': sorted(proposals)} + 'proposal': sorted(proposals), + 'look': THUMBNAIL_FILTER_LOOK, + 'exp_type': sorted(exp_types)} data_dict['tools'] = MONITORS data_dict['dropdown_menus'] = dropdown_menus @@ -1329,6 +1996,10 @@ def thumbnails_ajax(inst, proposal=None): data_dict['file_data'] = sorted_file_data + # Add list of observation numbers and group roots + data_dict['obs_list'] = obs_list + data_dict['exp_groups'] = sorted(exp_groups) + return data_dict @@ -1338,25 +2009,24 @@ def thumbnails_query_ajax(rootnames): Parameters ---------- - rootnames : list of strings (optional) - Rootname of APT proposal to filter + rootnames : list of strings Returns ------- data_dict : dict Dictionary of data needed for the ``thumbnails`` template """ - # Initialize dictionary that will contain all needed data - data_dict = {} - # dummy variable for view_image when thumbnail is selected - data_dict['inst'] = "all" - data_dict['file_data'] = {} + data_dict = {'inst': 'all', + 'file_data': dict()} + exp_groups = set() + # Gather data for each rootname for rootname in rootnames: # fit expected format for get_filenames_by_rootname() + split_name = rootname.split("_") try: - rootname = rootname.split("_")[0] + '_' + rootname.split("_")[1] + '_' + rootname.split("_")[2] + '_' + rootname.split("_")[3] + rootname = split_name[0] + '_' + split_name[1] + '_' + split_name[2] + '_' + split_name[3] except IndexError: continue @@ -1364,32 +2034,32 @@ def thumbnails_query_ajax(rootnames): try: filename_dict = filename_parser(rootname) except ValueError: - # Temporary workaround for noncompliant files in filesystem - filename_dict = {'activity': rootname[17:19], - 'detector': rootname[26:], - 'exposure_id': rootname[20:25], - 'observation': rootname[7:10], - 'parallel_seq_id': rootname[16], - 'program_id': rootname[2:7], - 'visit': rootname[10:13], - 'visit_group': rootname[14:16]} + continue + + # Add to list of all exposure groups + exp_groups.add(filename_dict['group_root']) # Get list of available filenames available_files = get_filenames_by_rootname(rootname) # Add data to dictionary data_dict['file_data'][rootname] = {} - try: - data_dict['file_data'][rootname]['inst'] = JWST_INSTRUMENT_NAMES_MIXEDCASE[filename_parser(rootname)['instrument']] - except KeyError: - data_dict['file_data'][rootname]['inst'] = "MIRI" - print("Warning: assuming instrument is MIRI") + data_dict['file_data'][rootname]['inst'] = JWST_INSTRUMENT_NAMES_MIXEDCASE[filename_parser(rootname)['instrument']] data_dict['file_data'][rootname]['filename_dict'] = filename_dict data_dict['file_data'][rootname]['available_files'] = available_files - data_dict['file_data'][rootname]['expstart'] = get_expstart(data_dict['file_data'][rootname]['inst'], rootname) - data_dict['file_data'][rootname]['suffixes'] = [filename_parser(filename)['suffix'] for - filename in available_files] + root_file_info = RootFileInfo.objects.get(root_name=rootname) + exp_start = root_file_info.expstart + data_dict['file_data'][rootname]['expstart'] = exp_start + data_dict['file_data'][rootname]['expstart_iso'] = Time(exp_start, format='mjd').iso.split('.')[0] + data_dict['file_data'][rootname]['suffixes'] = [] data_dict['file_data'][rootname]['prop'] = rootname[2:7] + for filename in available_files: + try: + suffix = filename_parser(filename)['suffix'] + data_dict['file_data'][rootname]['suffixes'].append(suffix) + except ValueError: + continue + data_dict['file_data'][rootname]['thumbnail'] = get_thumbnail_by_rootname(rootname) # Extract information for sorting with dropdown menus try: @@ -1416,5 +2086,6 @@ def thumbnails_query_ajax(rootnames): data_dict['tools'] = MONITORS data_dict['dropdown_menus'] = dropdown_menus + data_dict['exp_groups'] = sorted(exp_groups) return data_dict diff --git a/jwql/website/apps/jwql/db.py b/jwql/website/apps/jwql/db.py deleted file mode 100644 index cc5a283de..000000000 --- a/jwql/website/apps/jwql/db.py +++ /dev/null @@ -1,145 +0,0 @@ -"""Connects to the ``jwql`` database. - -This module is the primary interface between the ``jwql`` webapp and -the ``jwql`` database. It uses ``SQLAlchemy`` to start a session with -the database, and provides class methods that perform useful queries on -that database (for example, getting the names of all the files -associated with a certain instrument). - -Authors -------- - - - Lauren Chambers - -Use ---- - This module can be used as such: - :: - - from db import DatabaseConnection - db_connect = DatabaseConnection() - data = db_connect.get_filenames_for_instrument('NIRCam') - -Dependencies ------------- - The user must have a configuration file named ``config.json`` - placed in ``jwql/utils/`` directory. -""" - -import os - -from sqlalchemy.ext.automap import automap_base -from sqlalchemy.orm import Session -from sqlalchemy import create_engine -from astroquery.mast import Mast - -from jwql.utils.utils import get_config - - -class DatabaseConnection: - """Facilitates connection with the ``jwql`` database. - - Attributes - ---------- - ObservationWebtest : obj - Class instance in an "automap" schema corresponding to the - ``observationwebtest`` database table - session : obj - Session with the database that enables querying - """ - - def __init__(self, db_type, instrument=None): - """Determine what kind of database is being queried, and - call appropriate initialization method - """ - - self.db_type = db_type - - assert self.db_type in ['MAST', 'SQL'], \ - 'Unrecognized database type: {}. Must be SQL or MAST.'.format(db_type) - - if self.db_type == 'MAST': - self.init_MAST(instrument) - elif self.db_type == 'SQL': - self.init_SQL() - - def init_SQL(self): - """Start SQLAlchemy session with the ``jwql`` database""" - - # Get database credentials from config file - connection_string = get_config()['database']['connection_string'] - - # Connect to the database - engine = create_engine(connection_string) - - # Allow for automapping of database tables to classes - Base = automap_base() - - # Reflect the tables in the database - Base.prepare(engine, reflect=True) - - # Find the observations table - self.ObservationWebtest = Base.classes.observations_webtest - - # Start a session to enable queries - self.session = Session(engine) - - def init_MAST(self, instrument=None): - """Determine the necessary service string to query the MAST - database. - """ - - # Correctly format the instrument string - if instrument: - instrument = instrument[0].upper() + instrument[1:].lower() - else: - raise TypeError('Must provide instrument to initialize MAST database.') - - # Define the service name for the given instrument - self.service = "Mast.Jwst.Filtered." + instrument - print(self.service) - - def get_files_for_instrument(self, instrument): - """Given an instrument, query the database for all filenames - and paths associated with said instrument - - Parameters - ---------- - instrument : str - Name of JWST instrument - - Returns - ------- - filepaths: list - List of all filepaths in database for the provided - instrument - filenames: list - List of all filenames in database for the provided - instrument - """ - - instrument = instrument.upper() - - if self.db_type == 'SQL': - results = self.session.query(self.ObservationWebtest)\ - .filter(self.ObservationWebtest.instrument == instrument) - elif self.db_type == 'MAST': - params = {"columns": "*", - "filters": []} - response = Mast.service_request_async(self.service, params) - results = response[0].json()['data'] - - filepaths = [] - filenames = [] - for i in results: - if self.db_type == 'SQL': - filename = i.filename - elif self.db_type == 'MAST': - filename = i['filename'] - prog_id = filename[2:7] - file_path = os.path.join('jw' + prog_id, filename) - filepaths.append(file_path) - filenames.append(filename) - - session.close() - return filepaths, filenames diff --git a/jwql/website/apps/jwql/forms.py b/jwql/website/apps/jwql/forms.py index 51507f476..5b7d7ac49 100644 --- a/jwql/website/apps/jwql/forms.py +++ b/jwql/website/apps/jwql/forms.py @@ -12,6 +12,7 @@ - Johannes Sahlmann - Matthew Bourque - Teagan King + - Mike Engesser Use --- @@ -43,29 +44,27 @@ def view_function(request): placed in the ``jwql`` directory. """ +from collections import defaultdict import datetime import glob import os +import logging from astropy.time import Time, TimeDelta from django import forms from django.shortcuts import redirect -from jwedb.edb_interface import is_valid_mnemonic - -from jwql.database import database_interface as di -from jwql.utils.constants import ANOMALY_CHOICES_PER_INSTRUMENT -from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT -from jwql.utils.constants import APERTURES_PER_INSTRUMENT -from jwql.utils.constants import DETECTOR_PER_INSTRUMENT -from jwql.utils.constants import EXP_TYPE_PER_INSTRUMENT -from jwql.utils.constants import FILTERS_PER_INSTRUMENT -from jwql.utils.constants import GENERIC_SUFFIX_TYPES -from jwql.utils.constants import GRATING_PER_INSTRUMENT -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_SHORTHAND -from jwql.utils.constants import READPATT_PER_INSTRUMENT -from jwql.utils.utils import get_config, filename_parser -from jwql.utils.utils import query_format +from django.utils.html import format_html +from django.utils.safestring import mark_safe +from jwql.edb.engineering_database import is_valid_mnemonic +from jwql.website.apps.jwql.models import Anomalies + + +from jwql.utils.constants import (ANOMALY_CHOICES_PER_INSTRUMENT, ANOMALIES_PER_INSTRUMENT, APERTURES_PER_INSTRUMENT, DETECTOR_PER_INSTRUMENT, + EXP_TYPE_PER_INSTRUMENT, FILTERS_PER_INSTRUMENT, GENERIC_SUFFIX_TYPES, GRATING_PER_INSTRUMENT, + GUIDER_FILENAME_TYPE, JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_INSTRUMENT_NAMES_SHORTHAND, + READPATT_PER_INSTRUMENT, IGNORED_SUFFIXES, SUBARRAYS_PER_INSTRUMENT, PUPILS_PER_INSTRUMENT, + LOOK_OPTIONS, SORT_OPTIONS, PROPOSAL_CATEGORIES) +from jwql.utils.utils import (get_config, get_rootnames_for_instrument_proposal, filename_parser, query_format) from wtforms import SubmitField, StringField @@ -80,8 +79,8 @@ class BaseForm(forms.Form): resolve_submit = SubmitField('Resolve Target') -class AnomalyQueryForm(BaseForm): - """Form validation for the anomaly viewing tool""" +class JwqlQueryForm(BaseForm): + """Form validation for the JWQL Query viewing tool""" # Form submits calculate_submit = SubmitField() @@ -96,9 +95,11 @@ class AnomalyQueryForm(BaseForm): params[instrument]['readpatt_list'] = [] params[instrument]['exptype_list'] = [] params[instrument]['grating_list'] = [] + params[instrument]['subarray_list'] = [] + params[instrument]['pupil_list'] = [] params[instrument]['anomalies_list'] = [] # Generate dynamic lists of apertures to use in forms - for aperture in APERTURES_PER_INSTRUMENT[instrument.upper()]: + for aperture in APERTURES_PER_INSTRUMENT[instrument.lower()]: params[instrument]['aperture_list'].append([query_format(aperture), query_format(aperture)]) # Generate dynamic lists of filters to use in forms for filt in FILTERS_PER_INSTRUMENT[instrument]: @@ -120,19 +121,49 @@ class AnomalyQueryForm(BaseForm): for grating in GRATING_PER_INSTRUMENT[instrument]: grating = query_format(grating) params[instrument]['grating_list'].append([grating, grating]) + # Generate dynamic lists of subarray options to use in forms + for subarray in SUBARRAYS_PER_INSTRUMENT[instrument]: + subarray = query_format(subarray) + params[instrument]['subarray_list'].append([subarray, subarray]) + # Generate dynamic lists of pupil options to use in forms + for pupil in PUPILS_PER_INSTRUMENT[instrument]: + pupil = query_format(pupil) + params[instrument]['pupil_list'].append([pupil, pupil]) # Generate dynamic lists of anomalies to use in forms for anomaly in ANOMALIES_PER_INSTRUMENT.keys(): if instrument in ANOMALIES_PER_INSTRUMENT[anomaly]: item = [query_format(anomaly), query_format(anomaly)] params[instrument]['anomalies_list'].append(item) - # Anomaly Parameters - instrument = forms.MultipleChoiceField(required=False, - choices=[(inst, JWST_INSTRUMENT_NAMES_MIXEDCASE[inst]) for inst in JWST_INSTRUMENT_NAMES_MIXEDCASE], - widget=forms.CheckboxSelectMultiple) - exp_time_max = forms.DecimalField(required=False, initial="685") - exp_time_min = forms.DecimalField(required=False, initial="680") + # general parameters + instrument = forms.MultipleChoiceField( + required=False, + choices=[(inst, JWST_INSTRUMENT_NAMES_MIXEDCASE[inst]) for inst in JWST_INSTRUMENT_NAMES_MIXEDCASE], + widget=forms.CheckboxSelectMultiple) + + look_choices = [(query_format(choice), query_format(choice)) for choice in LOOK_OPTIONS] + look_status = forms.MultipleChoiceField( + required=False, choices=look_choices, widget=forms.CheckboxSelectMultiple) + + date_range = forms.CharField(required=True) + cat_choices = [(query_format(choice), query_format(choice)) for choice in PROPOSAL_CATEGORIES] + proposal_category = forms.MultipleChoiceField( + required=False, choices=cat_choices, widget=forms.CheckboxSelectMultiple) + + sort_choices = [(choice, choice) for choice in SORT_OPTIONS] + sort_type = forms.ChoiceField( + required=True, + choices=sort_choices, initial=sort_choices[2], + widget=forms.RadioSelect) + + num_choices = [(50, 50), (100, 100), (200, 200), (500, 500)] + num_per_page = forms.ChoiceField( + required=True, + choices=num_choices, initial=num_choices[1], + widget=forms.RadioSelect) + + # instrument specific parameters miri_aper = forms.MultipleChoiceField(required=False, choices=params['miri']['aperture_list'], widget=forms.CheckboxSelectMultiple) nirspec_aper = forms.MultipleChoiceField(required=False, choices=params['nirspec']['aperture_list'], widget=forms.CheckboxSelectMultiple) niriss_aper = forms.MultipleChoiceField(required=False, choices=params['niriss']['aperture_list'], widget=forms.CheckboxSelectMultiple) @@ -175,6 +206,18 @@ class AnomalyQueryForm(BaseForm): nircam_grating = forms.MultipleChoiceField(required=False, choices=params['nircam']['grating_list'], widget=forms.CheckboxSelectMultiple) fgs_grating = forms.MultipleChoiceField(required=False, choices=params['fgs']['grating_list'], widget=forms.CheckboxSelectMultiple) + miri_subarray = forms.MultipleChoiceField(required=False, choices=params['miri']['subarray_list'], widget=forms.CheckboxSelectMultiple) + nirspec_subarray = forms.MultipleChoiceField(required=False, choices=params['nirspec']['subarray_list'], widget=forms.CheckboxSelectMultiple) + niriss_subarray = forms.MultipleChoiceField(required=False, choices=params['niriss']['subarray_list'], widget=forms.CheckboxSelectMultiple) + nircam_subarray = forms.MultipleChoiceField(required=False, choices=params['nircam']['subarray_list'], widget=forms.CheckboxSelectMultiple) + fgs_subarray = forms.MultipleChoiceField(required=False, choices=params['fgs']['subarray_list'], widget=forms.CheckboxSelectMultiple) + + miri_pupil = forms.MultipleChoiceField(required=False, choices=params['miri']['pupil_list'], widget=forms.CheckboxSelectMultiple) + nirspec_pupil = forms.MultipleChoiceField(required=False, choices=params['nirspec']['pupil_list'], widget=forms.CheckboxSelectMultiple) + niriss_pupil = forms.MultipleChoiceField(required=False, choices=params['niriss']['pupil_list'], widget=forms.CheckboxSelectMultiple) + nircam_pupil = forms.MultipleChoiceField(required=False, choices=params['nircam']['pupil_list'], widget=forms.CheckboxSelectMultiple) + fgs_pupil = forms.MultipleChoiceField(required=False, choices=params['fgs']['pupil_list'], widget=forms.CheckboxSelectMultiple) + def clean_inst(self): inst = self.cleaned_data['instrument'] @@ -188,41 +231,34 @@ class InstrumentAnomalySubmitForm(forms.Form): def __init__(self, *args, **kwargs): instrument = kwargs.pop('instrument') super(InstrumentAnomalySubmitForm, self).__init__(*args, **kwargs) - self.fields['anomaly_choices'] = forms.MultipleChoiceField(choices=ANOMALY_CHOICES_PER_INSTRUMENT[instrument], widget=forms.CheckboxSelectMultiple()) + self.fields['anomaly_choices'] = forms.MultipleChoiceField( + choices=ANOMALY_CHOICES_PER_INSTRUMENT[instrument], + widget=forms.CheckboxSelectMultiple(), required=False) self.instrument = instrument - def update_anomaly_table(self, rootname, user, anomaly_choices): - """Updated the ``anomaly`` table of the database with flagged - anomaly information + def update_anomaly_table(self, rootfileinfo, user, anomaly_choices): + """Update the ``Anomalies`` model associated with the sent RootFileInfo. + All 'anomaly_choices' should be marked 'True' and the rest should be 'False' Parameters ---------- - rootname : str - The rootname of the image to flag (e.g. - ``jw86600008001_02101_00001_guider2``) + rootfileinfo : RootFileInfo + The RootFileInfo model object of the image to update user : str The user that is flagging the anomaly anomaly_choices : list A list of anomalies that are to be flagged (e.g. ``['snowball', 'crosstalk']``) """ + default_dict = {'flag_date': datetime.datetime.now(), + 'user': user} + for anomaly in Anomalies.get_all_anomalies(): + default_dict[anomaly] = (anomaly in anomaly_choices) - data_dict = {} - data_dict['rootname'] = rootname - data_dict['flag_date'] = datetime.datetime.now() - data_dict['user'] = user - for choice in anomaly_choices: - data_dict[choice] = True - if self.instrument == 'fgs': - di.engine.execute(di.FGSAnomaly.__table__.insert(), data_dict) - elif self.instrument == 'nirspec': - di.engine.execute(di.NIRSpecAnomaly.__table__.insert(), data_dict) - elif self.instrument == 'miri': - di.engine.execute(di.MIRIAnomaly.__table__.insert(), data_dict) - elif self.instrument == 'niriss': - di.engine.execute(di.NIRISSAnomaly.__table__.insert(), data_dict) - elif self.instrument == 'nircam': - di.engine.execute(di.NIRCamAnomaly.__table__.insert(), data_dict) + try: + Anomalies.objects.update_or_create(root_file_info=rootfileinfo, defaults=default_dict) + except Exception as e: + logging.warning('Unable to update anomaly table for {} due to {}'.format(rootfileinfo.root_name, e)) def clean_anomalies(self): @@ -238,7 +274,6 @@ class FileSearchForm(forms.Form): search = forms.CharField(label='', max_length=500, required=True, empty_value='Search') - # Initialize attributes fileroot_dict = None search_type = None instrument = None @@ -272,21 +307,43 @@ def clean_search(self): # See if there are any matching proposals and, if so, what # instrument they are for proposal_string = '{:05d}'.format(int(search)) - search_string_public = os.path.join(get_config()['filesystem'], 'public', 'jw{}'.format(proposal_string), '*', '*{}*.fits'.format(proposal_string)) - search_string_proprietary = os.path.join(get_config()['filesystem'], 'proprietary', 'jw{}'.format(proposal_string), '*', '*{}*.fits'.format(proposal_string)) + search_string_public = os.path.join(get_config()['filesystem'], 'public', 'jw{}'.format(proposal_string), + '*', '*{}*.fits'.format(proposal_string)) + search_string_proprietary = os.path.join(get_config()['filesystem'], 'proprietary', 'jw{}'.format(proposal_string), + '*', '*{}*.fits'.format(proposal_string)) all_files = glob.glob(search_string_public) all_files.extend(glob.glob(search_string_proprietary)) - # Ignore "original" files - all_files = [filename for filename in all_files if 'original' not in filename] + # Gather all files that do not have the 'IGNORED_SUFFIXES' in them + all_files = [filename for filename in all_files if not any(name in filename for name in IGNORED_SUFFIXES)] if len(all_files) > 0: all_instruments = [] + all_observations = defaultdict(list) for file in all_files: - instrument = filename_parser(file)['instrument'] - all_instruments.append(instrument) + filename = os.path.basename(file) + + # We only want to pass in datasets that are science exptypes. JWQL doesn't + # handle guider data, this will still allow for science FGS data but filter + # guider data. + if any(map(filename.__contains__, GUIDER_FILENAME_TYPE)): + continue + else: + instrument = filename_parser(file)['instrument'] + observation = filename_parser(file)['observation'] + all_instruments.append(instrument) + all_observations[instrument].append(observation) + + # sort lists so first observation is available when link is clicked. + for instrument in all_instruments: + all_observations[instrument].sort() + if len(set(all_instruments)) > 1: - raise forms.ValidationError('Cannot return result for proposal with multiple instruments ({}).'.format(', '.join(set(all_instruments)))) + # Technically all proposal have multiple instruments if you include guider data. Remove Guider Data + instrument_routes = [format_html('{}', instrument, proposal_string[1:], + all_observations[instrument][0], instrument) for instrument in set(all_instruments)] + raise forms.ValidationError( + mark_safe(('Proposal contains multiple instruments, please click instrument link to view data: {}.').format(', '.join(instrument_routes)))) # noqa self.instrument = all_instruments[0] else: @@ -347,11 +404,24 @@ def redirect_to_files(self): # If they searched for a proposal if self.search_type == 'proposal': proposal_string = '{:05d}'.format(int(search)) - return redirect('/{}/archive/{}'.format(self.instrument, proposal_string)) + all_rootnames = get_rootnames_for_instrument_proposal(self.instrument, proposal_string) + all_obs = [] + for root in all_rootnames: + # Wrap in try/except because level 3 rootnames won't have an observation + # number returned by the filename_parser. That's fine, we're not interested + # in those files anyway. + try: + all_obs.append(filename_parser(root)['observation']) + except KeyError: + pass + + observation = sorted(list(set(all_obs)))[0] + + return redirect('/{}/archive/{}/obs{}'.format(self.instrument, proposal_string, observation)) # If they searched for a file root elif self.search_type == 'fileroot': - return redirect('/{}/{}'.format(self.instrument, search)) + return redirect('/{}/{}/'.format(self.instrument, search)) class FiletypeForm(forms.Form): @@ -421,19 +491,17 @@ def clean_search(self): class MnemonicQueryForm(forms.Form): """A triple-field form to query mnemonic records in the DMS EDB.""" - production_mode = False + production_mode = True if production_mode: # times for default query (one day one week ago) now = Time.now() - delta_day = -7. - range_day = 1. - default_start_time = now + TimeDelta(delta_day, format='jd') - default_end_time = now + TimeDelta(delta_day + range_day, format='jd') + default_start_time = now + TimeDelta(3600., format='sec') + default_end_time = now else: # example for testing - default_start_time = Time('2019-04-02 00:00:00.000', format='iso') - default_end_time = Time('2019-04-02 00:01:00.000', format='iso') + default_start_time = Time('2022-06-20 00:00:00.000', format='iso') + default_end_time = Time('2022-06-21 00:00:00.000', format='iso') default_mnemonic_identifier = 'IMIR_HK_ICE_SEC_VOLT4' diff --git a/jwql/website/apps/jwql/migrations/0001_initial.py b/jwql/website/apps/jwql/migrations/0001_initial.py new file mode 100644 index 000000000..9de5ca6a9 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0001_initial.py @@ -0,0 +1,45 @@ +# Generated by Django 3.1.7 on 2022-09-09 17:47 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='ImageData', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('inst', models.CharField(choices=[('FGS', 'FGS'), ('MIRI', 'MIRI'), ('NIRCam', 'NIRCam'), ('NIRISS', 'NIRISS'), ('NIRSpec', 'NIRSpec')], default=None, max_length=7, verbose_name='instrument')), + ('pub_date', models.DateTimeField(verbose_name='date published')), + ('filepath', models.FilePathField(path='/user/lchambers/jwql/')), + ], + options={ + 'verbose_name_plural': 'image data', + 'db_table': 'imagedata', + }, + ), + migrations.CreateModel( + name='InstrumentFilterHandler', + fields=[ + ('instrument', models.CharField(max_length=10, primary_key=True, serialize=False)), + ], + ), + migrations.CreateModel( + name='ThumbnailFilterInfo', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('observation', models.PositiveIntegerField()), + ('proposal', models.PositiveIntegerField()), + ('root_name', models.CharField(max_length=300)), + ('marked_viewed', models.BooleanField(default=False)), + ('inst_handler', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jwql.instrumentfilterhandler')), + ], + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0002_auto_20220913_1525.py b/jwql/website/apps/jwql/migrations/0002_auto_20220913_1525.py new file mode 100644 index 000000000..049901811 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0002_auto_20220913_1525.py @@ -0,0 +1,27 @@ +# Generated by Django 3.1.7 on 2022-09-13 20:25 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0001_initial'), + ] + + operations = [ + migrations.RemoveField( + model_name='thumbnailfilterinfo', + name='id', + ), + migrations.AlterField( + model_name='instrumentfilterhandler', + name='instrument', + field=models.TextField(max_length=10, primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='thumbnailfilterinfo', + name='root_name', + field=models.TextField(max_length=300, primary_key=True, serialize=False), + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0003_auto_20220921_0955.py b/jwql/website/apps/jwql/migrations/0003_auto_20220921_0955.py new file mode 100644 index 000000000..df7bc6965 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0003_auto_20220921_0955.py @@ -0,0 +1,58 @@ +# Generated by Django 3.1.7 on 2022-09-21 14:55 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0002_auto_20220913_1525'), + ] + + operations = [ + migrations.CreateModel( + name='Archive', + fields=[ + ('instrument', models.CharField(help_text='Instrument name', max_length=7, primary_key=True, serialize=False)), + ], + options={ + 'ordering': ['instrument'], + }, + ), + migrations.CreateModel( + name='Observation', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('obsnum', models.CharField(help_text='Observation number, as a 3 digit string', max_length=3)), + ('number_of_files', models.IntegerField(default=0, help_text='Number of files in the proposal')), + ('obsstart', models.FloatField(default=0.0, help_text='Time of the beginning of the observation in MJD')), + ('obsend', models.FloatField(default=0.0, help_text='Time of the end of the observation in MJD')), + ('exptypes', models.CharField(default='', help_text='Comma-separated list of exposure types', max_length=100)), + ], + options={ + 'ordering': ['-obsnum'], + }, + ), + migrations.CreateModel( + name='Proposal', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('prop_id', models.CharField(help_text='5-digit proposal ID string', max_length=5)), + ('thumbnail_path', models.CharField(default='', help_text='Path to the proposal thumbnail', max_length=100)), + ('archive', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jwql.archive')), + ], + options={ + 'ordering': ['-prop_id'], + 'unique_together': {('prop_id', 'archive')}, + }, + ), + migrations.DeleteModel( + name='ImageData', + ), + migrations.AddField( + model_name='observation', + name='proposal', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jwql.proposal'), + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0004_auto_20220922_0911.py b/jwql/website/apps/jwql/migrations/0004_auto_20220922_0911.py new file mode 100644 index 000000000..998dd2ce4 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0004_auto_20220922_0911.py @@ -0,0 +1,33 @@ +# Generated by Django 3.1.7 on 2022-09-22 14:11 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0003_auto_20220921_0955'), + ] + + operations = [ + migrations.AddField( + model_name='thumbnailfilterinfo', + name='obsnum', + field=models.CharField(default=11, help_text='Observation number, as a 3 digit string', max_length=3), + preserve_default=False, + ), + migrations.CreateModel( + name='RootFileInfo', + fields=[ + ('instrument', models.CharField(help_text='Instrument name', max_length=7)), + ('proposal', models.CharField(help_text='5-digit proposal ID string', max_length=5)), + ('root_name', models.TextField(max_length=300, primary_key=True, serialize=False)), + ('viewed', models.BooleanField(default=False)), + ('obsnum', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jwql.observation')), + ], + options={ + 'ordering': ['-root_name'], + }, + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0005_auto_20220922_1422.py b/jwql/website/apps/jwql/migrations/0005_auto_20220922_1422.py new file mode 100644 index 000000000..3c51cbe23 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0005_auto_20220922_1422.py @@ -0,0 +1,23 @@ +# Generated by Django 3.1.7 on 2022-09-22 19:22 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0004_auto_20220922_0911'), + ] + + operations = [ + migrations.RemoveField( + model_name='thumbnailfilterinfo', + name='inst_handler', + ), + migrations.DeleteModel( + name='InstrumentFilterHandler', + ), + migrations.DeleteModel( + name='ThumbnailFilterInfo', + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0006_auto_20230214_1624.py b/jwql/website/apps/jwql/migrations/0006_auto_20230214_1624.py new file mode 100644 index 000000000..493608df3 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0006_auto_20230214_1624.py @@ -0,0 +1,95 @@ +# Generated by Django 3.1.7 on 2023-02-14 21:24 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0005_auto_20220922_1422'), + ] + + operations = [ + migrations.CreateModel( + name='Anomalies', + fields=[ + ('root_file_info', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='jwql.rootfileinfo')), + ('cosmic_ray_shower', models.BooleanField(default=False)), + ('diffraction_spike', models.BooleanField(default=False)), + ('excessive_saturation', models.BooleanField(default=False)), + ('guidestar_failure', models.BooleanField(default=False)), + ('persistence', models.BooleanField(default=False)), + ('crosstalk', models.BooleanField(default=False)), + ('data_transfer_error', models.BooleanField(default=False)), + ('ghost', models.BooleanField(default=False)), + ('snowball', models.BooleanField(default=False)), + ('column_pull_up', models.BooleanField(default=False)), + ('column_pull_down', models.BooleanField(default=False)), + ('dominant_msa_leakage', models.BooleanField(default=False)), + ('dragons_breath', models.BooleanField(default=False)), + ('mrs_glow', models.BooleanField(default=False)), + ('mrs_zipper', models.BooleanField(default=False)), + ('internal_reflection', models.BooleanField(default=False)), + ('optical_short', models.BooleanField(default=False)), + ('row_pull_up', models.BooleanField(default=False)), + ('row_pull_down', models.BooleanField(default=False)), + ('lrs_contamination', models.BooleanField(default=False)), + ('tree_rings', models.BooleanField(default=False)), + ('scattered_light', models.BooleanField(default=False)), + ('claws', models.BooleanField(default=False)), + ('wisps', models.BooleanField(default=False)), + ('tilt_event', models.BooleanField(default=False)), + ('light_saber', models.BooleanField(default=False)), + ('other', models.BooleanField(default=False)), + ], + options={ + 'ordering': ['-root_file_info'], + }, + ), + migrations.AddField( + model_name='proposal', + name='cat_type', + field=models.CharField(default='', help_text='Category Type', max_length=10), + ), + migrations.AddField( + model_name='rootfileinfo', + name='aperature', + field=models.CharField(default='', help_text='Aperature', max_length=40), + ), + migrations.AddField( + model_name='rootfileinfo', + name='detector', + field=models.CharField(default='', help_text='Detector', max_length=40), + ), + migrations.AddField( + model_name='rootfileinfo', + name='filter', + field=models.CharField(default='', help_text='Instrument name', max_length=7), + ), + migrations.AddField( + model_name='rootfileinfo', + name='grating', + field=models.CharField(default='', help_text='Grating', max_length=40), + ), + migrations.AddField( + model_name='rootfileinfo', + name='pupil', + field=models.CharField(default='', help_text='Pupil', max_length=40), + ), + migrations.AddField( + model_name='rootfileinfo', + name='read_patt', + field=models.CharField(default='', help_text='Read Pattern', max_length=40), + ), + migrations.AddField( + model_name='rootfileinfo', + name='read_patt_num', + field=models.IntegerField(default=0, help_text='Read Pattern Number'), + ), + migrations.AddField( + model_name='rootfileinfo', + name='subarray', + field=models.CharField(default='', help_text='Subarray', max_length=40), + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0007_auto_20230222_1157.py b/jwql/website/apps/jwql/migrations/0007_auto_20230222_1157.py new file mode 100644 index 000000000..2d4ffdba3 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0007_auto_20230222_1157.py @@ -0,0 +1,22 @@ +# Generated by Django 3.1.7 on 2023-02-22 16:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0006_auto_20230214_1624'), + ] + + operations = [ + migrations.RemoveField( + model_name='rootfileinfo', + name='aperature', + ), + migrations.AddField( + model_name='rootfileinfo', + name='aperture', + field=models.CharField(default='', help_text='Aperture', max_length=40), + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0008_rootfileinfo_exp_type.py b/jwql/website/apps/jwql/migrations/0008_rootfileinfo_exp_type.py new file mode 100644 index 000000000..2c67cd955 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0008_rootfileinfo_exp_type.py @@ -0,0 +1,18 @@ +# Generated by Django 3.1.7 on 2023-02-22 17:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0007_auto_20230222_1157'), + ] + + operations = [ + migrations.AddField( + model_name='rootfileinfo', + name='exp_type', + field=models.CharField(default='', help_text='Exposure Type', max_length=40), + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0009_auto_20230303_0930.py b/jwql/website/apps/jwql/migrations/0009_auto_20230303_0930.py new file mode 100644 index 000000000..d0dbafc76 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0009_auto_20230303_0930.py @@ -0,0 +1,63 @@ +# Generated by Django 3.1.7 on 2023-03-03 14:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0008_rootfileinfo_exp_type'), + ] + + operations = [ + migrations.RenameField( + model_name='proposal', + old_name='cat_type', + new_name='category', + ), + migrations.AddField( + model_name='rootfileinfo', + name='expstart', + field=models.FloatField(default=0.0, help_text='Exposure Start Time'), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='aperture', + field=models.CharField(blank=True, default='', help_text='Aperture', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='detector', + field=models.CharField(blank=True, default='', help_text='Detector', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='exp_type', + field=models.CharField(blank=True, default='', help_text='Exposure Type', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='filter', + field=models.CharField(blank=True, default='', help_text='Instrument name', max_length=7, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='grating', + field=models.CharField(blank=True, default='', help_text='Grating', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='pupil', + field=models.CharField(blank=True, default='', help_text='Pupil', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='read_patt', + field=models.CharField(blank=True, default='', help_text='Read Pattern', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='subarray', + field=models.CharField(blank=True, default='', help_text='Subarray', max_length=40, null=True), + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0010_auto_20230313_1053.py b/jwql/website/apps/jwql/migrations/0010_auto_20230313_1053.py new file mode 100644 index 000000000..3d80e95fe --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0010_auto_20230313_1053.py @@ -0,0 +1,23 @@ +# Generated by Django 3.1.7 on 2023-03-13 15:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0009_auto_20230303_0930'), + ] + + operations = [ + migrations.AddField( + model_name='anomalies', + name='flag_date', + field=models.DateTimeField(blank=True, help_text='flag date', null=True), + ), + migrations.AddField( + model_name='anomalies', + name='user', + field=models.CharField(blank=True, default='', help_text='user', max_length=50, null=True), + ), + ] diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/__init__.py b/jwql/website/apps/jwql/migrations/__init__.py similarity index 100% rename from jwql/instrument_monitors/miri_monitors/data_trending/__init__.py rename to jwql/website/apps/jwql/migrations/__init__.py diff --git a/jwql/website/apps/jwql/models.py b/jwql/website/apps/jwql/models.py index 90454b267..2d39bd340 100644 --- a/jwql/website/apps/jwql/models.py +++ b/jwql/website/apps/jwql/models.py @@ -1,7 +1,5 @@ """Defines the models for the ``jwql`` app. -** CURRENTLY NOT IN USE ** - In Django, "a model is the single, definitive source of information about your data. It contains the essential fields and behaviors of the data you’re storing. Generally, each model maps to a single database @@ -14,7 +12,8 @@ Authors ------- - Lauren Chambers - + - Bryan Hilbert + - Brad Sappington Use --- This module is used as such: @@ -29,8 +28,6 @@ ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` """ -import os - from django.db import models @@ -41,40 +38,148 @@ ('NIRSpec', 'NIRSpec')) -class BaseModel(models.Model): - """A base model that other classes will inherit. Created to avoid - an obscure error about a missing ``app_label``. - """ +class Archive(models.Model): + """A class defining the model used to hold information needed for the archive pages.""" + # Fields + instrument = models.CharField(max_length=7, help_text="Instrument name", primary_key=True) + + # … + # Metadata class Meta: - abstract = True # specify this model as an Abstract Model app_label = 'jwql' + ordering = ['instrument'] + def __str__(self): + """String for representing the Archive object (in Admin site etc.).""" + return self.instrument -class ImageData(BaseModel): - """A model that collects image filepaths, instrument labels, and - publishing date/time. Just an example used for learning django. - Attributes - ---------- - filepath : FilePathField object - The full filepath of the datum - inst : CharField object - Name of the corresponding JWST instrument - pub_date : FilePathField object - Date and time when datum was added to the database. - """ +class Proposal(models.Model): + """A class defining the model used to hold information about a given proposal""" + # Fields + prop_id = models.CharField(max_length=5, help_text="5-digit proposal ID string") + thumbnail_path = models.CharField(max_length=100, help_text='Path to the proposal thumbnail', default='') + archive = models.ForeignKey(Archive, blank=False, null=False, on_delete=models.CASCADE) + category = models.CharField(max_length=10, help_text="Category Type", default='') - inst = models.CharField('instrument', max_length=7, choices=INSTRUMENT_LIST, default=None) - pub_date = models.DateTimeField('date published') - filepath = models.FilePathField(path='/user/lchambers/jwql/') + # Metadata + class Meta: + app_label = 'jwql' + ordering = ['-prop_id'] + unique_together = ('prop_id', 'archive') + models.UniqueConstraint(fields=['prop_id', 'archive'], name='unique_instrument_proposal') - def filename(self): - return os.path.basename(self.filepath) + def __str__(self): + """String for representing the Archive object (in Admin site etc.).""" + return self.prop_id + + +class Observation(models.Model): + """A class defining the model used to hold information about an observation from a given proposal""" + # Fields + obsnum = models.CharField(max_length=3, help_text='Observation number, as a 3 digit string') + number_of_files = models.IntegerField(help_text='Number of files in the proposal', default=0) + obsstart = models.FloatField(help_text='Time of the beginning of the observation in MJD', default=0.) + obsend = models.FloatField(help_text='Time of the end of the observation in MJD', default=0.) + proposal = models.ForeignKey(Proposal, blank=False, null=False, on_delete=models.CASCADE) + exptypes = models.CharField(max_length=100, help_text='Comma-separated list of exposure types', default='') + + # … + # Metadata + class Meta: + app_label = 'jwql' + ordering = ['-obsnum'] + models.UniqueConstraint(fields=['proposal', 'obsnum'], name='unique_proposal_obsnum') + + def __str__(self): + """String for representing the Archive object (in Admin site etc.).""" + return self.obsnum + + +class RootFileInfo(models.Model): + """ All info stored with root file for ease of sorting """ + instrument = models.CharField(max_length=7, help_text="Instrument name") + obsnum = models.ForeignKey(Observation, blank=False, null=False, on_delete=models.CASCADE) + proposal = models.CharField(max_length=5, help_text="5-digit proposal ID string") + root_name = models.TextField(primary_key=True, max_length=300) + viewed = models.BooleanField(default=False) + filter = models.CharField(max_length=7, help_text="Instrument name", default='', null=True, blank=True) + aperture = models.CharField(max_length=40, help_text="Aperture", default='', null=True, blank=True) + detector = models.CharField(max_length=40, help_text="Detector", default='', null=True, blank=True) + read_patt_num = models.IntegerField(help_text='Read Pattern Number', default=0) + read_patt = models.CharField(max_length=40, help_text="Read Pattern", default='', null=True, blank=True) + grating = models.CharField(max_length=40, help_text="Grating", default='', null=True, blank=True) + subarray = models.CharField(max_length=40, help_text="Subarray", default='', null=True, blank=True) + pupil = models.CharField(max_length=40, help_text="Pupil", default='', null=True, blank=True) + exp_type = models.CharField(max_length=40, help_text="Exposure Type", default='', null=True, blank=True) + expstart = models.FloatField(help_text='Exposure Start Time', default=0.0) + + # Metadata + class Meta: + app_label = 'jwql' + ordering = ['-root_name'] def __str__(self): - return self.filename() + """String for representing the RootFileInfo object (in Admin site etc.).""" + return self.root_name + + +class Anomalies(models.Model): + """ All Potential Anomalies that can be associated with a RootFileInfo """ + # Note: Using one to one relationship. Cann access Anomalies by 'rootfileinfo_object.anomalies' + root_file_info = models.OneToOneField( + RootFileInfo, + on_delete=models.CASCADE, + primary_key=True, + ) + flag_date = models.DateTimeField(help_text="flag date", null=True, blank=True) + user = models.CharField(max_length=50, help_text="user", default='', null=True, blank=True) + cosmic_ray_shower = models.BooleanField(default=False) + diffraction_spike = models.BooleanField(default=False) + excessive_saturation = models.BooleanField(default=False) + guidestar_failure = models.BooleanField(default=False) + persistence = models.BooleanField(default=False) + crosstalk = models.BooleanField(default=False) + data_transfer_error = models.BooleanField(default=False) + ghost = models.BooleanField(default=False) + snowball = models.BooleanField(default=False) + column_pull_up = models.BooleanField(default=False) + column_pull_down = models.BooleanField(default=False) + dominant_msa_leakage = models.BooleanField(default=False) + dragons_breath = models.BooleanField(default=False) + mrs_glow = models.BooleanField(default=False) + mrs_zipper = models.BooleanField(default=False) + internal_reflection = models.BooleanField(default=False) + optical_short = models.BooleanField(default=False) + row_pull_up = models.BooleanField(default=False) + row_pull_down = models.BooleanField(default=False) + lrs_contamination = models.BooleanField(default=False) + tree_rings = models.BooleanField(default=False) + scattered_light = models.BooleanField(default=False) + claws = models.BooleanField(default=False) + wisps = models.BooleanField(default=False) + tilt_event = models.BooleanField(default=False) + light_saber = models.BooleanField(default=False) + other = models.BooleanField(default=False) + + def get_marked_anomalies(self): + """Return all boolean field names (anomalies) currently set""" + true_anomalies = [] + for field, value in vars(self).items(): + if isinstance(value, bool) and value: + true_anomalies.append(field) + return true_anomalies + + @classmethod + def get_all_anomalies(cls): + """Return list of all anomalies (assumed as any field with default of False)""" + return [f.name for f in cls._meta.fields if isinstance(f.default, bool)] class Meta: - verbose_name_plural = "image data" - db_table = 'imagedata' + app_label = 'jwql' + ordering = ['-root_file_info'] + + def __str__(self): + """Container for all anomalies associated with each RootFileInfo object """ + return self.root_file_info.root_name diff --git a/jwql/website/apps/jwql/monitor_pages/__init__.py b/jwql/website/apps/jwql/monitor_pages/__init__.py index ed0aaf2fd..ed184d7ff 100644 --- a/jwql/website/apps/jwql/monitor_pages/__init__.py +++ b/jwql/website/apps/jwql/monitor_pages/__init__.py @@ -1,6 +1 @@ -from .monitor_bad_pixel_bokeh import BadPixelMonitor -from .monitor_bias_bokeh import BiasMonitor -from .monitor_dark_bokeh import DarkMonitor -from .monitor_filesystem_bokeh import MonitorFilesystem -from .monitor_mast_bokeh import MastMonitor -from .monitor_readnoise_bokeh import ReadnoiseMonitor +from .monitor_cosmic_rays_bokeh import CosmicRayMonitor diff --git a/jwql/website/apps/jwql/monitor_pages/dark_monitor.py b/jwql/website/apps/jwql/monitor_pages/dark_monitor.py deleted file mode 100644 index cc83d2ce7..000000000 --- a/jwql/website/apps/jwql/monitor_pages/dark_monitor.py +++ /dev/null @@ -1,41 +0,0 @@ -import os - -import numpy as np - -from jwql.bokeh_templating import BokehTemplate -from jwql.utils.utils import get_config - -SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class DarkMonitor(BokehTemplate): - - def pre_init(self): - - self._embed = True - - # app design - self.format_string = None - self.interface_file = os.path.join(SCRIPT_DIR, "dark_monitor_interface.yml") - - self.settings = get_config() - self.output_dir = self.settings['outputs'] - - self.load_data() - self.timestamps = np.arange(10) / 10. - self.dark_current = np.arange(10) - - def post_init(self): - - self.refs['dark_current_yrange'].start = min(self.dark_current) - self.refs['dark_current_yrange'].end = max(self.dark_current) - - def load_data(self): - # actually load data: - new_data = np.arange(10) / 20 - - # update columndatasource - self.dark_current = new_data - - -DarkMonitor() diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py index 0be5d85a0..5e522f7cd 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py @@ -8,378 +8,714 @@ Use --- - This module can be used from the command line as such: + This module can be used from the command line like this: :: from jwql.website.apps.jwql import monitor_pages - monitor_template = monitor_pages.BadPixelMonitor('NIRCam', 'NRCA3_FULL') - script, div = monitor_template.embed("bad_pixel_time_figure") + monitor_pages.BadPixelPlots('nircam') """ import os from astropy.io import fits +from astropy.stats import sigma_clipped_stats +from astropy.time import Time +from bokeh.embed import components, file_html +from bokeh.io import show +from bokeh.layouts import layout +from bokeh.models import ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearColorMapper, Panel, Tabs, Text, Title +from bokeh.plotting import figure +from bokeh.resources import CDN import datetime import numpy as np +from sqlalchemy import and_, func -from jwql.database.database_interface import session +from jwql.database.database_interface import get_unique_values_per_column, session from jwql.database.database_interface import NIRCamBadPixelQueryHistory, NIRCamBadPixelStats from jwql.database.database_interface import NIRISSBadPixelQueryHistory, NIRISSBadPixelStats from jwql.database.database_interface import MIRIBadPixelQueryHistory, MIRIBadPixelStats from jwql.database.database_interface import NIRSpecBadPixelQueryHistory, NIRSpecBadPixelStats from jwql.database.database_interface import FGSBadPixelQueryHistory, FGSBadPixelStats -from jwql.utils.constants import BAD_PIXEL_TYPES, DARKS_BAD_PIXEL_TYPES, FLATS_BAD_PIXEL_TYPES, JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.utils import filesystem_path -from jwql.bokeh_templating import BokehTemplate +from jwql.utils.constants import BAD_PIXEL_MONITOR_MAX_POINTS_TO_PLOT, BAD_PIXEL_TYPES, DARKS_BAD_PIXEL_TYPES +from jwql.utils.constants import DETECTOR_PER_INSTRUMENT, FLATS_BAD_PIXEL_TYPES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import filesystem_path, get_config, read_png, save_png SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +OUTPUT_DIR = get_config()['outputs'] -class BadPixelMonitor(BokehTemplate): +class BadPixelPlots(): + """Class for creating the bad pixel monitor plots and figures to be displayed + in the web app - # Combine instrument and aperture into a single property because we - # do not want to invoke the setter unless both are updated - @property - def aperture_info(self): - return (self._instrument, self._aperture) + Attributes + ---------- + instrument : str + Name of instrument (e.g. 'nircam') - @aperture_info.setter - def aperture_info(self, info): - self._instrument, self._aperture = info - self.pre_init() - self.post_init() + detectors : list + List of detectors corresponding to ```instrument```. One tab will be created + for each detector. - def bad_pixel_history(self, bad_pixel_type): - """Use the database to construct information on the total number - of a given type of bad pixels over time + pixel_table : sqlalchemy table + Table containing bad pixel information for each detector + + query_table : sqlalchemy table + Table containing history of bad pixel monitor runs and files used + + _html : str + HTML for the bad pixel monitor page + """ + def __init__(self, instrument): + self.instrument = instrument.lower() + + # Get the relevant database tables + self.identify_tables() + + self.detectors = sorted(DETECTOR_PER_INSTRUMENT[self.instrument]) + if self.instrument == 'miri': + self.detectors = ['MIRIMAGE'] + + self.run() + + def identify_tables(self): + """Determine which database tables as associated with + a given instrument""" + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument.lower()] + self.query_table = eval('{}BadPixelQueryHistory'.format(mixed_case_name)) + self.pixel_table = eval('{}BadPixelStats'.format(mixed_case_name)) + + def modify_bokeh_saved_html(self): + """Given an html string produced by Bokeh when saving bad pixel monitor plots, + make tweaks such that the page follows the general JWQL page formatting. + """ + lines = self._html.split('\n') + + # List of lines that Bokeh likes to save in the file, but we don't want + lines_to_remove = ["", + '', + ' ', + ''] + + # Our Django-related lines that need to be at the top of the file + hstring = """href="{{'/jwqldb/%s_bad_pixel_stats'%inst.lower()}}" name=test_link class="btn btn-primary my-2" type="submit">Go to JWQLDB page""" + newlines = ['{% extends "base.html" %}\n', "\n", + "{% block preamble %}\n", "\n", + f"{JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]} Bad Pixel Monitor- JWQL\n", "\n", + "{% endblock %}\n", "\n", + "{% block content %}\n", "\n", + '
\n', "\n", + f"

{JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]} Bad Pixel Monitor

\n", + "
\n", + f" View or Download {JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]} Bad Pixel Stats Table:\n" + ] + + # More lines that we want to have in the html file, at the bottom + endlines = ["\n", + "
\n", "\n", + "{% endblock %}" + ] + + for line in lines: + if line not in lines_to_remove: + newlines.append(line + '\n') + newlines = newlines + endlines + + self._html = "".join(newlines) + + def run(self): + + # Right now, the aperture name in the query history table is used as the title of the + # bad pixel plots. The name associated with entries in the bad pixel stats table is the + # detector name. Maybe we should switch to use this. + detector_panels = [] + for detector in self.detectors: + + # Get data from the database + data = BadPixelData(self.pixel_table, self.instrument, detector) + + # Create plots of the location of new bad pixels + all_plots = {} + all_plots['new_pix'] = {} + all_plots['trending'] = {} + for badtype in data.badtypes: + all_plots['new_pix'][badtype] = NewBadPixPlot(detector, badtype, data.num_files[badtype], data.new_bad_pix[badtype], + data.background_file[badtype], data.baseline_file[badtype], + data.obs_start_time[badtype], data.obs_end_time[badtype]).plot + all_plots['trending'][badtype] = BadPixTrendPlot(detector, badtype, data.trending_data[badtype]).plot + plot_layout = badpix_monitor_plot_layout(all_plots) + + # Create a tab for each type of plot + detector_panels.append(Panel(child=plot_layout, title=detector)) + + # Build tabs + tabs = Tabs(tabs=detector_panels) + + # Return tab HTML and JavaScript to web app + script, div = components(tabs) + + # Insert into our html template and save + template_dir = os.path.join(os.path.dirname(__file__), '../templates') + template_file = os.path.join(template_dir, 'bad_pixel_monitor_savefile_basic.html') + temp_vars = {'inst': self.instrument, 'plot_script': script, 'plot_div': div} + self._html = file_html(tabs, CDN, f'{self.instrument} bad pix monitor', template_file, temp_vars) + + # Modify the html such that our Django-related lines are kept in place, + # which will allow the page to keep the same formatting and styling as + # the other web app pages + self.modify_bokeh_saved_html() + + # Save html file + outdir = os.path.dirname(template_file) + outfile = f'{self.instrument}_bad_pix_plots.html' + outfile = os.path.join(outdir, outfile) + with open(outfile, "w") as file: + file.write(self._html) + + +class BadPixelData(): + """Class to retrieve and store bad pixel monitor data from the database + + Parameters + ---------- + pixel_table : sqlalchemy table + Table containing bad pixel information for each detector + + instrument : str + Instrument name, e.g. 'nircam' + + detector : str + Detector name, e.g. 'NRCA1' + + Atributes + --------- + background_file : str + Name of one file used to find the current selection of bad pixels + + badtypes : list + List of bad pixel types present in ```pixel_table``` + + baseline_file : str + Name of file containing a previous collection of bad pixels, to be + compared against the new collection of bad pixels + + detector : str + Detector name, e.g. 'NRCA1' + + instrument : str + Instrument name, e.g. 'nircam' + + new_bad_pix : dict + Keys are the types of bad pixels (e.g. 'dead'). The value for each key + is a 2-tuple. The first element is a list of x coordinates, and the second + element is a list of y coordinates, corresponding to the locations of that + type of bad pixel. + + num_files : dict + Keys are the types of bad pixels (e.g. 'dead'). The value of each is the number + of files used when searching for that type of bad pixel. + + obs_end_time : dict + Keys are the types of bad pixels (e.g. 'dead'). The value of each is the ending + time (datetime instance) of the observations used to find the bad pixels. + + obs_start_time : dict + Keys are the types of bad pixels (e.g. 'dead'). The value of each is the starting + time (datetime instance) of the observations used to find the bad pixels. + + pixel_table : sqlalchemy table + Table containing bad pixel information for each detector + + trending_data : dict + Keys are the types of bad pixels (e.g. 'dead'). The value of each is a 3-tuple of + data to be used to create the trending plot. The first element is the detector name, + the second is a list of the number of bad pixels, and the third is a list of the + datetimes associated with the bad pixel numbers. + """ + def __init__(self, pixel_table, instrument, detector): + self.pixel_table = pixel_table + self.instrument = instrument + self.detector = detector + self.trending_data = {} + self.new_bad_pix = {} + self.background_file = {} + self.obs_start_time = {} + self.obs_end_time = {} + self.num_files = {} + self.baseline_file = {} + + # Get a list of the bad pixel types present in the database + self.badtypes = get_unique_values_per_column(self.pixel_table, 'type') + + # If the database is empty, return a generic entry showing that fact + if len(self.badtypes) == 0: + self.badtypes = ['BAD'] + + # Get data for the plot of new bad pixels + self.get_most_recent_entry() + + # Get data for the trending plots + for badtype in self.badtypes: + self.get_trending_data(badtype) + + def get_most_recent_entry(self): + """Get all nedded data from the database tables. + """ + # For the given detector, get the latest entry for each bad pixel type + subq = (session + .query(self.pixel_table.type, func.max(self.pixel_table.entry_date).label("max_created")) + .filter(self.pixel_table.detector == self.detector) + .group_by(self.pixel_table.type) + .subquery() + ) + + query = (session.query(self.pixel_table) + .join(subq, self.pixel_table.entry_date == subq.c.max_created) + ) + + latest_entries_by_type = query.all() + session.close() + + # Organize the results + for row in latest_entries_by_type: + self.new_bad_pix[row.type] = (row.x_coord, row.y_coord) + self.background_file[row.type] = row.source_files[0] + self.obs_start_time[row.type] = row.obs_start_time + self.obs_end_time[row.type] = row.obs_end_time + self.num_files[row.type] = len(row.source_files) + self.baseline_file[row.type] = row.baseline_file + + # If no data is retrieved from the database at all, add a dummy generic entry + if len(self.new_bad_pix.keys()) == 0: + self.new_bad_pix[self.badtypes[0]] = ([], []) + self.background_file[self.badtypes[0]] = '' + self.obs_start_time[self.badtypes[0]] = datetime.datetime.today() + self.obs_end_time[self.badtypes[0]] = datetime.datetime.today() + self.num_files[self.badtypes[0]] = 0 + self.baseline_file[self.badtypes[0]] = '' + + def get_trending_data(self, badpix_type): + """Retrieve and organize the data needed to produce the trending plot. Parameters ---------- - bad_pixel_type : str - The flavor of bad pixel (e.g. 'hot') + badpix_type : str + The type of bad pixel to query for, e.g. 'dead' + """ + # Query database for all data in the table with a matching detector and bad pixel type + all_entries_by_type = session.query(self.pixel_table.type, self.pixel_table.detector, func.array_length(self.pixel_table.x_coord, 1), + self.pixel_table.obs_mid_time) \ + .filter(and_(self.pixel_table.detector == self.detector, self.pixel_table.type == badpix_type)) \ + .all() - Returns - ------- - num_bad_pixels : numpy.ndarray - 1D array of the number of bad pixels + # Organize the results + num_pix = [] + times = [] + for i, row in enumerate(all_entries_by_type): + if i == 0: + badtype = row[0] + detector = row[1] + num_pix.append(row[2]) + times.append(row[3]) + + # If there was no data in the database, create an empty entry + if len(num_pix) == 0: + badtype = badpix_type + detector = self.detector + num_pix = [0] + times = [datetime.datetime.today()] + + # Add results to self.trending_data + self.trending_data[badpix_type] = (detector, num_pix, times) + session.close() + + +class NewBadPixPlot(): + """Class to create a plot showing the location of newly discovered bad pixels of a certain type + + Parameters + ---------- + detector_name : str + Name of detector, e.g. NRCA1 + + badpix_type : str + Type of bad pixel, e.g. 'dead' + + nfiles : int + Number of files used to find the bad pixels + + coords : tuple + 2-tuple. The first element is a list of x coordinates, and the second + element is a list of y coordinates, corresponding to the locations of that + type of bad pixel. + + background_file : str + Name of one of the files used to find the bad pixels + + baseline_file : str + Name of file containing previously identified bad pixels, which were compared to the + new collection of bad pixels + + obs_start_time : datetime.datetime + Datetime of the beginning of the observations used in the search for the bad pixels + + obs_end_time : datetime.datetime + Datetime of the ending of the observations used in the search for the bad pixels + + Attributes + ---------- + background_file : str + Name of one of the files used to find the bad pixels + + badpix_type : str + Type of bad pixel, e.g. 'dead' + + baseline_file : str + Name of file containing previously identified bad pixels, which were compared to the + new collection of bad pixels + + coords : tuple + 2-tuple. The first element is a list of x coordinates, and the second + element is a list of y coordinates, corresponding to the locations of that + type of bad pixel. + + detector : str + Name of detector, e.g. NRCA1 + + num_files : int + Number of files used to find the bad pixels - dates : datetime.datetime - 1D array of dates/times corresponding to num_bad_pixels + obs_start_time : datetime.datetime + Datetime of the beginning of the observations used in the search for the bad pixels + + obs_end_time : datetime.datetime + Datetime of the ending of the observations used in the search for the bad pixels + + plot : Bokeh.plotting.figure + Figure showing the location of the bad pixels on the detector + + _detlen : int + Number of pixels in one row or column of ```detector``` + + _use_png : bool + Whether or not to create the Bokeh figure using circle glyphs of all bad pixels, or to + save the plot of bad pixels as a png and load that (in order to reduce data volume.) + """ + def __init__(self, detector_name, badpix_type, nfiles, coords, background_file, baseline_file, obs_start_time, obs_end_time): + self.detector = detector_name + self.badpix_type = badpix_type + self.num_files = nfiles + self.coords = coords + self.background_file = background_file + self.baseline_file = baseline_file + self.obs_start_time = obs_start_time + self.obs_end_time = obs_end_time + + # If no background file is given, we fall back to plotting the bad pixels + # on top of an empty image. In that case, we need to know how large the + # detector is, just to create an image of the right size. + if 'MIRI' in self.detector.upper(): + self._detlen = 1024 + else: + self._detlen = 2048 + + # If there are "too many" points then we are going to save the plot as + # a png rather than send all the data to the browser.\ + self._use_png = False + if len(self.coords[0]) > BAD_PIXEL_MONITOR_MAX_POINTS_TO_PLOT: + self._use_png = True + + self.create_plot() + + def create_plot(self): + """Create the plot by showing background image, and marking the locations + of new bad pixels on top. We load a png file of the background image rather + than the original fits file in order to reduce the amount of data in the + final html file. """ - # Find all the rows corresponding to the requested type of bad pixel - rows = [row for row in self.bad_pixel_table if row.type == bad_pixel_type] - - # Extract the dates and number of bad pixels from each entry - dates = [row.obs_mid_time for row in rows] - num = [len(row.coordinates[0]) for row in rows] - - # If there are no valid entres in the database, return None - if len(dates) == 0: - return None, None - - # Sort by date to make sure everything is in chronological order - chrono = np.argsort(dates) - dates = dates[chrono] - num = num[chrono] - - # Sum the number of bad pixels found from the earliest entry up to - # each new entry - num_bad_pixels = [np.sum(num[0:i]) for i in range(1, len(num) + 1)] - - return num_bad_pixels, dates - - def _badpix_image(self): - """Update bokeh objects with sample image data.""" - - # Open the mean dark current file and get the data - with fits.open(self.image_file) as hdulist: - data = hdulist[1].data - - # Grab only one frame - ndims = len(data.shape) - if ndims == 4: - data = data[0, -1, :, :] - elif ndims == 3: - data = data[-1, :, :] - elif ndims == 2: - pass + # Read in the data, or create an empty array + png_file = self.background_file.replace('.fits', '.png') + full_path_background_file = os.path.join(OUTPUT_DIR, 'bad_pixel_monitor/', png_file) + + if os.path.isfile(full_path_background_file): + image = read_png(full_path_background_file) else: - raise ValueError('Unrecognized number of dimensions in data file: {}'.format(ndims)) + image = None - # Update the plot with the data and boundaries - y_size, x_size = data.shape - self.refs["bkgd_image"].data['image'] = [data] - self.refs["stamp_xr"].end = x_size - self.refs["stamp_yr"].end = y_size - self.refs["bkgd_source"].data['dw'] = [x_size] - self.refs["bkgd_source"].data['dh'] = [y_size] + start_time = self.obs_start_time.strftime("%m/%d/%Y") + end_time = self.obs_end_time.strftime("%m/%d/%Y") - # Set the image color scale - self.refs["log_mapper"].high = 0 - self.refs["log_mapper"].low = -.2 + title_text = f'{self.detector}: New {self.badpix_type} pix: from {self.num_files} files. {start_time} to {end_time}' - # Add a title - self.refs['badpix_map_figure'].title.text = '{}: New Bad Pixels'.format(self._aperture) - self.refs['badpix_map_figure'].title.align = "center" - self.refs['badpix_map_figure'].title.text_font_size = "20px" + # Create figure + # If there are "too many" points then we are going to save the plot as + # a png rather than send all the data to the browser. In that case, we + # don't want to add any tools to the figure + if not self._use_png: + tools = 'pan,box_zoom,reset,wheel_zoom,save' + self.plot = figure(title=title_text, tools=tools, + x_axis_label="Pixel Number", y_axis_label="Pixel Number") + else: + self.plot = figure(tools='') + self.plot.toolbar.logo = None + self.plot.toolbar_location = None + self.plot.min_border = 0 + self.plot.xaxis.visible = False + self.plot.yaxis.visible = False + + self.plot.x_range.range_padding = self.plot.y_range.range_padding = 0 + + # Plot image + if image is not None: + ny, nx = image.shape + # Shift the figure title slightly right in this case to get it + # to align with the axes + self.plot.image_rgba(image=[image], x=0, y=0, dw=self._detlen, dh=self._detlen, alpha=0.5) + else: + # If the background image is not present, manually set the x and y range + self.plot.x_range.start = 0 + self.plot.x_range.end = self._detlen + self.plot.x_range.start = 0 + self.plot.x_range.end = self._detlen - def most_recent_coords(self, bad_pixel_type): - """Return the coordinates of the bad pixels in the most recent - database entry for the given bad pixel type + legend_title = f'Compared to baseline file {os.path.basename(self.baseline_file)}' - Parameters - ---------- - bad_pixel_type : str - The flavor of bad pixel (e.g. 'hot') + # Overplot locations of bad pixels for the bad pixel type + plot_legend = self.overplot_bad_pix() + + # If there are "too many" points, we have already omitted all of the bokeh tools. + # Now we export as a png and place that into the figure, as a way of reducing the + # amount of data sent to the browser. This png will be saved and immediately read + # back in. + if self._use_png: + output_filename = full_path_background_file.replace('.png', f'_{self.badpix_type}_pix.png') + self.switch_to_png(output_filename, title_text) + + # Create and add legend to the figure + legend = Legend(items=[plot_legend], + location="center", + orientation='vertical', + title=legend_title) + + self.plot.add_layout(legend, 'below') + + def overplot_bad_pix(self): + """Add a scatter plot of potential new bad pixels to the plot Returns ------- - coords : tup - Tuple containing a list of x coordinates and a list of y - coordinates + legend_item : tup + Tuple of legend text and associated plot. Will be converted into + a LegendItem and added to the plot legend """ - # Find all the rows corresponding to the requested type of bad pixel - rows = [row for row in self.bad_pixel_table if row.type == bad_pixel_type] - - # Extract dates, number of bad pixels, and files used from each entry - dates = [row.obs_mid_time for row in rows] - coords = [row.coordinates for row in rows] - files = [row.source_files[0] for row in rows] - - # If there are no valid entres in the database, return None - if len(dates) == 0: - return None, None - - # Sort by date to make sure everything is in chronological order - chrono = np.argsort(dates) - dates = dates[chrono] - coords = coords[chrono] - files = files[chrono] - - # Keep track of the latest timestamp - self.last_timestamp = dates[-1].isoformat() - - # Grab the name of one of the files used when these bad pixels - # were identified. We'll use this as an image on top of which - # the bad pixels will be noted. Note that these should be - # slope files - self.image_file = filesystem_path(files[-1]) - - # Return the list of coordinates for the most recent entry - return coords[-1] - - def pre_init(self): - # Start with default values for instrument and aperture because - # BokehTemplate's __init__ method does not allow input arguments - try: - dummy_instrument = self._instrument - dummy_aperture = self._aperture - except AttributeError: - self._instrument = 'NIRCam' - self._aperture = 'NRCA1_FULL' - - self._embed = True - - # Fix aperture/detector name discrepency - if self._aperture in ['NRCA5_FULL', 'NRCB5_FULL']: - self.detector = '{}LONG'.format(self._aperture[0:4]) + numpix = len(self.coords[0]) + + if numpix > 0: + source = ColumnDataSource(data=dict(pixels_x=self.coords[0], + pixels_y=self.coords[1], + values=[self.badpix_type] * numpix + ) + ) else: - self.detector = self._aperture.split('_')[0] + # If there are no new bad pixels, write text within the figure mentioning that + txt_source = ColumnDataSource(data=dict(x=[self._detlen / 10], y=[self._detlen / 2], + text=[f'No new {self.badpix_type} pixels found'])) + glyph = Text(x="x", y="y", text="text", angle=0., text_color="navy", text_font_size={'value': '20px'}) + self.plot.add_glyph(txt_source, glyph) + + # Insert a fake one, in order to get the plot to be made + fakex = np.array([0, self._detlen, self._detlen, 0]) + fakey = np.array([0, 0, self._detlen, self._detlen]) + fakex = [int(e) for e in fakex] + fakey = [int(e) for e in fakey] + source = ColumnDataSource(data=dict(pixels_x=fakex, + pixels_y=fakey, + values=['N/A'] * len(fakex) + ) + ) + + # Overplot the bad pixel locations + # If we have very few bad pixels to plot, increase the size of the circles, in order to make + # it easier to find them on the plot + radius = 0.5 + if len(self.coords[0]) < 50: + radius = 1.0 + pink = '#EC04FF' + green = '#07FF1F' + badpixplots = self.plot.circle(x='pixels_x', y='pixels_y', source=source, fill_color=pink, line_color=pink, + fill_alpha=1.0, line_alpha=1.0, radius=radius) + + # Create hover tool for the bad pixel type + # If there are "too many" points then we are going to save the plot as + # a png rather than send all the data to the browser. In that case, we + # don't need a hover tool + if not self._use_png: + hover_tool = HoverTool(tooltips=[(f'{self.badpix_type} (x, y):', '(@pixels_x, @pixels_y)'), + ], + renderers=[badpixplots]) + # Add tool to plot + self.plot.tools.append(hover_tool) + + # Add to the legend + text = f"{numpix} potential new {self.badpix_type} pix compared to baseline" + + # Create a tuple to be added to the plot legend + legend_items = (text, [badpixplots]) + return legend_items + + def switch_to_png(self, filename, title): + """Convert the current Bokeh figure from a figure containing circle glyphs to a png + representation. - # App design - self.format_string = None - self.interface_file = os.path.join(SCRIPT_DIR, "yaml", "badpixel_monitor_interface.yaml") + Parameters + ---------- + filename : str + Name of file to save the current figure as a png into - # Load data tables - self.load_data() - self.get_history_data() - # For development, while the database tables are empty - # self.load_dummy_data() + title : str + Title to add to the Figure + """ + # Save the figure as a png + save_png(self.plot, filename=filename) + set_permissions(filename) - # Get dates and coordinates of the most recent entries - self.most_recent_data() + # Read in the png and insert into a replacement figure + fig_array = read_png(filename) + ydim, xdim = fig_array.shape - # This shows that for e.g. NRCA2_FULL, the data are what we expect, - # but somehow the plot is not showing it!!!!!!!! - # if self._aperture != 'NRCA1_FULL': - # raise ValueError(self._aperture, self.latest_bad_from_dark_type, self.latest_bad_from_dark_x, self.latest_bad_from_dark_y) + # Create the figure + self.plot = figure(title=title, x_range=(0, self._detlen), y_range=(0, self._detlen), width=xdim, height=ydim, + tools='pan,box_zoom,reset,wheel_zoom,save', x_axis_label="Pixel Number", y_axis_label="Pixel Number") + self.plot.image_rgba(image=[fig_array], x=0, y=0, dw=self._detlen, dh=self._detlen) - def post_init(self): - self._update_badpix_v_time() - self._update_badpix_loc_plot() + # Now that the data from the png is in the figure, delete the png + os.remove(filename) - def get_history_data(self): - """Extract data on the history of bad pixel numbers from the - database query result - """ - self.bad_history = {} - self.bad_latest = {} - for bad_pixel_type in BAD_PIXEL_TYPES: - matching_rows = [row for row in self.bad_pixel_table if row.type == bad_pixel_type] - if len(matching_rows) != 0: - real_data = True - times = [row.obs_mid_time for row in matching_rows] - num = np.array([len(row.x_coord) for row in matching_rows]) - - latest_row = times.index(max(times)) - self.bad_latest[bad_pixel_type] = (max(times), matching_rows[latest_row].x_coord, matching_rows[latest_row].y_coord) - - # If there are no records of a certain type of bad pixel, then - # fall back to a default date and 0 bad pixels. Remember that - # these plots are always showing the number of NEW bad pixels - # that are not included in the current reference file. - else: - real_data = False - - times = [datetime.datetime(2021, 10, 31), datetime.datetime(2021, 11, 1)] - badpix_x = [1000, 999] - badpix_y = [1000, 999] - num = np.array([0, 0]) - self.bad_latest[bad_pixel_type] = (max(times), badpix_x, badpix_y) - - hover_values = np.array([datetime.datetime.strftime(t, "%d-%b-%Y") for t in times]) - self.bad_history[bad_pixel_type] = (times, num, hover_values) - - # if real_data: - # raise ValueError(bad_pixel_type, self.bad_history[bad_pixel_type]) - def identify_tables(self): - """Determine which database tables as associated with - a given instrument""" - mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()] - self.query_table = eval('{}BadPixelQueryHistory'.format(mixed_case_name)) - self.pixel_table = eval('{}BadPixelStats'.format(mixed_case_name)) +class BadPixTrendPlot(): + """Class to create a plot of the number of bad pixels of a certain type versus time - def load_data(self): - """Query the database tables to get data""" + Parameters + ---------- + detector_name : str + Name of the detector, e.g. 'NRCA1' - # Determine which database tables are needed based on instrument - self.identify_tables() + badpix_type : str + Type of bad pixel, e.g. 'dead' - # Query database for all data with a matching aperture - self.bad_pixel_table = session.query(self.pixel_table) \ - .filter(self.pixel_table.detector == self.detector) \ - .all() + entry : tup + 3-tuple of the data to be plotted. (BadPixelData.trending_data for a certain type + of bad pixel). The first element is the detector name, the second is a list of + the number of bad pixels, and the third is a list of the datetimes associated + with the bad pixel numbers. - session.close() + Attributes + ---------- + detector : str + Name of the detector, e.g. 'NRCA1' - def load_dummy_data(self): - """Create dummy data for Bokeh plot development""" - import datetime - - # Populate a dictionary with the number of bad pixels vs time for - # each type of bad pixel. We can't get the full list of bad pixel - # types from the database itself, because if there is a type of bad - # pixel with no found instances, then it won't appear in the database - # Also populate a dictionary containing the locations of all of the - # bad pixels found in the most recent search - self.bad_history = {} - self.bad_latest = {} - for i, bad_pixel_type in enumerate(BAD_PIXEL_TYPES): - - # Comment out while waiting for populated database tables - # num, times = self.bad_pixel_history(bad_pixel_type) - delta = 10 * i - - # Placeholders while we wait for a populated database - days = np.arange(1, 11) - times = np.array([datetime.datetime(2020, 8, day, 12, 0, 0) for day in days]) - num = np.arange(10) - hover_values = np.array([datetime.datetime.strftime(t, "%d-%b-%Y") for t in times]) - - self.bad_history[bad_pixel_type] = (times, num, hover_values) - self.bad_latest[bad_pixel_type] = (datetime.datetime(1999, 12, 31), [500 + delta, 501 + delta, 502 + delta], [4, 4, 4]) - - def most_recent_data(self): - """Get the bad pixel type and coordinates associated with the most - recent run of the monitor. Note that the most recent date can be - different for dark current data vs flat field data - """ - self.latest_bad_from_dark_type = [] - self.latest_bad_from_dark_x = [] - self.latest_bad_from_dark_y = [] - dark_times = [self.bad_latest[bad_pixel_type][0] for bad_pixel_type in DARKS_BAD_PIXEL_TYPES] - if len(dark_times) > 0: - self.most_recent_dark_date = max(dark_times) - else: - self.most_recent_dark_date = datetime.datetime(1999, 10, 31) - - for bad_pixel_type in DARKS_BAD_PIXEL_TYPES: - if self.bad_latest[bad_pixel_type][0] == self.most_recent_dark_date: - self.latest_bad_from_dark_type.extend([bad_pixel_type] * len(self.bad_latest[bad_pixel_type][1])) - self.latest_bad_from_dark_x.extend(self.bad_latest[bad_pixel_type][1]) - self.latest_bad_from_dark_y.extend(self.bad_latest[bad_pixel_type][2]) - - self.latest_bad_from_dark_type = np.array(self.latest_bad_from_dark_type) - self.latest_bad_from_dark_x = np.array(self.latest_bad_from_dark_x) - self.latest_bad_from_dark_y = np.array(self.latest_bad_from_dark_y) - - self.latest_bad_from_flat_type = [] - self.latest_bad_from_flat_x = [] - self.latest_bad_from_flat_y = [] - - self.latest_bad_from_flat = [[], [], []] - flat_times = [self.bad_latest[bad_pixel_type][0] for bad_pixel_type in FLATS_BAD_PIXEL_TYPES] - if len(flat_times) > 1: - self.most_recent_flat_date = max(flat_times) - else: - self.most_recent_flat_date = datetime.datetime(1999, 10, 31) - for bad_pixel_type in FLATS_BAD_PIXEL_TYPES: - if self.bad_latest[bad_pixel_type][0] == self.most_recent_flat_date: - self.latest_bad_from_flat_type.extend([bad_pixel_type] * len(self.bad_latest[bad_pixel_type][1])) - self.latest_bad_from_flat_x.extend(self.bad_latest[bad_pixel_type][1]) - self.latest_bad_from_flat_y.extend(self.bad_latest[bad_pixel_type][2]) - - self.latest_bad_from_flat_type = np.array(self.latest_bad_from_flat_type) - self.latest_bad_from_flat_x = np.array(self.latest_bad_from_flat_x) - self.latest_bad_from_flat_y = np.array(self.latest_bad_from_flat_y) - - def _update_badpix_loc_plot(self): - """Update the plot properties for the plots showing the locations - of new bad pixels""" - if 'MIR' in self._aperture: - self.refs['dark_position_xrange'].end = 1024 - self.refs['dark_position_yrange'].end = 1024 - self.refs['flat_position_xrange'].end = 1024 - self.refs['flat_position_yrange'].end = 1024 - - dark_date = self.most_recent_dark_date.strftime('%d-%b-%Y %H:%m') - self.refs['dark_position_figure'].title.text = '{} New Bad Pixels (darks). Obs Time: {}'.format(self._aperture, dark_date) - self.refs['dark_position_figure'].title.align = "center" - self.refs['dark_position_figure'].title.text_font_size = "15px" - - flat_date = self.most_recent_flat_date.strftime('%d-%b-%Y %H:%m') - self.refs['flat_position_figure'].title.text = '{} New Bad Pixels (flats). Obs Time: {}'.format(self._aperture, flat_date) - self.refs['flat_position_figure'].title.align = "center" - self.refs['flat_position_figure'].title.text_font_size = "15px" - - def _update_badpix_v_time(self): - """Update the plot properties for the plots of the number of bad - pixels versus time + badpix_type : str + Type of bad pixel, e.g. 'dead' + + num_pix : list + List of the number of bad pixels found for a list of times + + plot : Bokeh.plotting.figure + Bokeh figure showing a plot of the number of bad pixels versus time + + time : list + List of datetimes associated with ```num_pix``` + """ + def __init__(self, detector_name, badpix_type, entry): + self.detector = detector_name + self.badpix_type = badpix_type + self.detector, self.num_pix, self.time = entry + self.create_plot() + + def create_plot(self): + """Takes the data, places it in a ColumnDataSource, and creates the figure """ - for bad_pixel_type in BAD_PIXEL_TYPES: - bad_pixel_type_lc = bad_pixel_type.lower() - - # Define y ranges of bad pixel v. time plot - buffer_size = 0.05 * (max(self.bad_history[bad_pixel_type][1]) - min(self.bad_history[bad_pixel_type][1])) - if buffer_size == 0: - buffer_size = 1 - self.refs['{}_history_yrange'.format(bad_pixel_type_lc)].start = min(self.bad_history[bad_pixel_type][1]) - buffer_size - self.refs['{}_history_yrange'.format(bad_pixel_type_lc)].end = max(self.bad_history[bad_pixel_type][1]) + buffer_size - - # Define x range of bad_pixel v. time plot - horizontal_half_buffer = (max(self.bad_history[bad_pixel_type][0]) - min(self.bad_history[bad_pixel_type][0])) * 0.05 - if horizontal_half_buffer == 0: - horizontal_half_buffer = 1. # day - self.refs['{}_history_xrange'.format(bad_pixel_type_lc)].start = min(self.bad_history[bad_pixel_type][0]) - horizontal_half_buffer - self.refs['{}_history_xrange'.format(bad_pixel_type_lc)].end = max(self.bad_history[bad_pixel_type][0]) + horizontal_half_buffer - - # Add a title - self.refs['{}_history_figure'.format(bad_pixel_type.lower())].title.text = '{}: {} pixels'.format(self._aperture, bad_pixel_type) - self.refs['{}_history_figure'.format(bad_pixel_type.lower())].title.align = "center" - self.refs['{}_history_figure'.format(bad_pixel_type.lower())].title.text_font_size = "20px" - -# Uncomment the line below when testing via the command line: -# bokeh serve --show monitor_badpixel_bokeh.py -# BadPixelMonitor() + # This plot will eventually be saved to an html file by Bokeh. However, when + # we place the saved html lines into our jinja template files, we cannot have + # datetime formatted data in the hover tool. This is because the saved Bokeh + # html will contain lines such as "time{%d %m %Y}". But jinja sees this and + # interprets the {%d as an html tag, so when you try to load the page, it + # crashes when it finds a bunch of "d" tags that are unclosed. To get around + # this, we'll create a list of string representations of the datetime values + # here, and place these in the columndatasource to be used with the hover tool + string_times = [e.strftime('%d %b %Y %H:%M') for e in self.time] + + # Create a ColumnDataSource for the main amp to use + source = ColumnDataSource(data=dict(num_pix=self.num_pix, + time=self.time, + string_time=string_times, + value=[self.badpix_type] * len(self.num_pix) + ) + ) + + self.plot = figure(title=f'{self.detector}: New {self.badpix_type} Pixels', tools='pan,box_zoom,reset,wheel_zoom,save', + background_fill_color="#fafafa") + + self.plot.scatter(x='time', y='num_pix', fill_color="navy", alpha=0.75, source=source) + + hover_tool = HoverTool(tooltips=[('# Pixels:', '@num_pix'), + ('Date:', '@string_time') + ]) + self.plot.tools.append(hover_tool) + + # Make the x axis tick labels look nice + self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], + seconds=["%d %b %H:%M:%S.%3N"], + hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"] + ) + self.plot.xaxis.major_label_orientation = np.pi / 4 + + # Set x range + time_pad = (max(self.time) - min(self.time)) * 0.05 + if time_pad == datetime.timedelta(seconds=0): + time_pad = datetime.timedelta(days=1) + self.plot.x_range.start = min(self.time) - time_pad + self.plot.x_range.end = max(self.time) + time_pad + self.plot.grid.grid_line_color = "white" + self.plot.xaxis.axis_label = 'Date' + self.plot.yaxis.axis_label = f'Number of {self.badpix_type} pixels' + + +def badpix_monitor_plot_layout(plots): + """Arrange a set of plots into a bokeh layout. Generate nested lists for + the plot layout for a given aperture. Contents of tabs should be similar + for all apertures of a given instrument. Keys of the input plots will + control the exact layout. + + Paramters + --------- + plots : dict + Nested dictionary containing a set of plots for an aperture. + Required keys are 'new_pix' and 'trending'. Each of these contain a + dictionary where the keys are the types of bad pixels, and the values + are the Bokeh figures. 'new_pix' and 'trending' should contain the + same set of keys. 'new_pix' contains the figures showing new bad pixel + locations, while 'trending' contains the figures showign the number of + bad pixels with time. + + Returns + ------- + plot_layout : bokeh.layouts.layout + Layout containing all of the input figures + """ + # Create a list of plots where each plot shows one flavor of bad pixel + all_plots = [] + for badtype in plots["trending"]: + rowplots = [plots["new_pix"][badtype], plots["trending"][badtype]] + all_plots.append(rowplots) + + # Now create a layout that holds the lists + plot_layout = layout(all_plots) + + return plot_layout diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py index c0381687e..4b5a6a6de 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py @@ -6,6 +6,7 @@ - Ben Sunnquist - Maria A. Pena-Guerrero + - Bryan Hilbert Use --- @@ -23,163 +24,640 @@ import os from astropy.stats import sigma_clip + +from bokeh.embed import components, file_html +from bokeh.layouts import layout +from bokeh.models import ColorBar, ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearAxis +from bokeh.models.widgets import Tabs, Panel +from bokeh.plotting import figure, output_file, save +from bokeh.resources import CDN +from datetime import datetime, timedelta import numpy as np +import pandas as pd +from PIL import Image +from sqlalchemy import func from jwql.bokeh_templating import BokehTemplate -from jwql.database.database_interface import session, NIRCamBiasStats, NIRISSBiasStats, NIRSpecBiasStats -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.database.database_interface import get_unique_values_per_column, NIRCamBiasStats, NIRISSBiasStats, NIRSpecBiasStats, session +from jwql.utils.constants import FULL_FRAME_APERTURES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import read_png +from jwql.website.apps.jwql.bokeh_utils import PlaceholderPlot + SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +TEMPLATE_DIR = os.path.join(SCRIPT_DIR, '../templates') -class BiasMonitor(BokehTemplate): +class BiasMonitorData(): + """Class to hold bias data to be plotted - # Combine the input parameters into a single property because we - # do not want to invoke the setter unless all are updated - @property - def input_parameters(self): - return (self._instrument, self._aperture) + Parameters + ---------- + instrument : str + Instrument name (e.g. nircam) - @input_parameters.setter - def input_parameters(self, info): - self._instrument, self._aperture = info - self.pre_init() - self.post_init() + Attributes + ---------- - def identify_tables(self): - """Determine which database tables to use for the given instrument""" + instrument : str + Instrument name (e.g. nircam) - mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()] - self.stats_table = eval('{}BiasStats'.format(mixed_case_name)) + latest_data : pandas.DataFrame + Latest bias data for a particular aperture, from the + stats_table - def load_data(self): - """Query the database tables to get all of the relevant bias data""" + stats_table : sqlalchemy.orm.decl_api.DeclarativeMeta + Bias stats sqlalchemy table - # Determine which database tables are needed based on instrument + trending_data : pandas.DataFrame + Data from the stats table to be used for the trending plot + """ + def __init__(self, instrument): + self.instrument = instrument self.identify_tables() + def identify_tables(self): + """Determine which database tables to use for the given instrument""" + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument.lower()] + self.stats_table = eval('{}BiasStats'.format(mixed_case_name)) + + def retrieve_trending_data(self, aperture): + """Query the database table to get all of the data needed to create + the plots of mean bias signals over time + + Parameters + ---------- + aperture : str + Name of the aperture whose data are being collected. e.g. 'NRCA1_FULL' + """ # Query database for all data in bias stats with a matching aperture, # and sort the data by exposure start time. - self.query_results = session.query(self.stats_table) \ - .filter(self.stats_table.aperture == self._aperture) \ + tmp_trending_data = session.query(self.stats_table.amp1_even_med, + self.stats_table.amp1_odd_med, + self.stats_table.amp2_even_med, + self.stats_table.amp2_odd_med, + self.stats_table.amp3_even_med, + self.stats_table.amp3_odd_med, + self.stats_table.amp4_even_med, + self.stats_table.amp4_odd_med, + self.stats_table.expstart, + self.stats_table.uncal_filename) \ + .filter(self.stats_table.aperture == aperture) \ .order_by(self.stats_table.expstart) \ .all() session.close() - def pre_init(self): - - # Start with default values for instrument and aperture because - # BokehTemplate's __init__ method does not allow input arguments - try: - dummy_instrument = self._instrument - dummy_aperture = self._aperture - except AttributeError: - self._instrument = 'NIRCam' - self._aperture = '' - - self._embed = True - self.format_string = None - self.interface_file = os.path.join(SCRIPT_DIR, 'yaml', 'monitor_bias_interface.yaml') - - def post_init(self): - - # Load the bias data - self.load_data() - - # Update the mean bias over time figures - self.update_mean_bias_figures() - - # Update the calibrated 0th group image - self.update_calibrated_image() - - # Update the histogram of the calibrated 0th group image - if self._instrument == 'NIRISS': - self.update_calibrated_histogram() - - # Update the calibrated collapsed values figures - if self._instrument != 'NIRISS': - self.update_collapsed_vals_figures() - - def update_calibrated_histogram(self): - """Updates the calibrated 0th group histogram""" - - if len(self.query_results) != 0: - # Get the most recent data; the entries were sorted by time when - # loading the database, so the last entry will always be the most recent. - counts = np.array(self.query_results[-1].counts) - bin_centers = np.array(self.query_results[-1].bin_centers) - - # Update the calibrated image histogram - self.refs['cal_hist_source'].data = {'counts': counts, - 'bin_centers': bin_centers} - self.refs['cal_hist_xr'].start = bin_centers.min() - self.refs['cal_hist_xr'].end = bin_centers.max() - self.refs['cal_hist_yr'].start = counts.min() - self.refs['cal_hist_yr'].end = counts.max() + counts.max() * 0.05 - - def update_calibrated_image(self): - """Updates the calibrated 0th group image""" - - if len(self.query_results) != 0: - # Get the most recent data; the entries were sorted by time when - # loading the database, so the last entry will always be the most recent. - cal_image_png = self.query_results[-1].cal_image - cal_image_png = os.path.join('/static', '/'.join(cal_image_png.split('/')[-6:])) - - # Update the image source for the figure - self.refs['cal_image'].image_url(url=[cal_image_png], x=0, y=0, w=2048, h=2048, anchor="bottom_left") - - # Update the calibrated image style - self.refs['cal_image'].xaxis.visible = False - self.refs['cal_image'].yaxis.visible = False - self.refs['cal_image'].xgrid.grid_line_color = None - self.refs['cal_image'].ygrid.grid_line_color = None - self.refs['cal_image'].title.text_font_size = '22px' - self.refs['cal_image'].title.align = 'center' - - def update_collapsed_vals_figures(self): - """Updates the calibrated median-collapsed row and column figures""" - - if len(self.query_results) != 0: - for direction in ['rows', 'columns']: - # Get most recent data; the entries were sorted by time when - # loading the database, so the last entry will always be the most recent. - vals = np.array(self.query_results[-1].__dict__['collapsed_{}'.format(direction)]) - pixels = np.arange(len(vals)) - self.refs['collapsed_{}_source'.format(direction)].data = {'pixel': pixels, - 'signal': vals} - - # Update the pixel and signal limits - self.refs['collapsed_{}_pixel_range'.format(direction)].start = pixels.min() - 10 - self.refs['collapsed_{}_pixel_range'.format(direction)].end = pixels.max() + 10 - self.refs['collapsed_{}_signal_range'.format(direction)].start = vals[4:2044].min() - 10 # excluding refpix - self.refs['collapsed_{}_signal_range'.format(direction)].end = vals[4:2044].max() + 10 - - def update_mean_bias_figures(self): - """Updates the mean bias over time bokeh plots""" - - # Get the dark exposures and their starts times - filenames = [os.path.basename(result.uncal_filename).replace('_uncal.fits', '') for result in self.query_results] - expstarts_iso = np.array([result.expstart for result in self.query_results]) - expstarts = np.array([datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f') for date in expstarts_iso]) - - # Update the mean bias figures for all amps and odd/even columns - for amp in ['1', '2', '3', '4']: - for kind in ['odd', 'even']: - bias_vals = np.array([getattr(result, 'amp{}_{}_med'.format(amp, kind)) for result in self.query_results]) - self.refs['mean_bias_source_amp{}_{}'.format(amp, kind)].data = {'time': expstarts, - 'time_iso': expstarts_iso, - 'mean_bias': bias_vals, - 'filename': filenames} - self.refs['mean_bias_figure_amp{}_{}'.format(amp, kind)].title.text = 'Amp {} {}'.format(amp, kind.capitalize()) - self.refs['mean_bias_figure_amp{}_{}'.format(amp, kind)].hover.tooltips = [('file', '@filename'), - ('time', '@time_iso'), - ('bias level', '@mean_bias')] - - # Update plot limits if data exists - if len(bias_vals) != 0: - self.refs['mean_bias_xr_amp{}_{}'.format(amp, kind)].start = expstarts.min() - timedelta(days=3) - self.refs['mean_bias_xr_amp{}_{}'.format(amp, kind)].end = expstarts.max() + timedelta(days=3) - self.refs['mean_bias_yr_amp{}_{}'.format(amp, kind)].start = min(x for x in bias_vals if x is not None) - 20 - self.refs['mean_bias_yr_amp{}_{}'.format(amp, kind)].end = max(x for x in bias_vals if x is not None) + 20 + # Convert the query results to a pandas dataframe + self.trending_data = pd.DataFrame(tmp_trending_data, columns=['amp1_even_med', 'amp1_odd_med', + 'amp2_even_med', 'amp2_odd_med', + 'amp3_even_med', 'amp3_odd_med', + 'amp4_even_med', 'amp4_odd_med', + 'expstart_str', 'uncal_filename']) + uncal_basename = [os.path.basename(e) for e in self.trending_data['uncal_filename']] + self.trending_data['uncal_filename'] = uncal_basename + + # Add a column of expstart values that are datetime objects + format_data = "%Y-%m-%dT%H:%M:%S.%f" + datetimes = [datetime.strptime(entry, format_data) for entry in self.trending_data['expstart_str']] + self.trending_data['expstart'] = datetimes + + def retrieve_latest_data(self, aperture): + """Query the database table to get the data needed for the non-trending + plots. In this case, we need only the most recent entry. + + Parameters + ---------- + aperture : str + Aperture name (e.g. NRCA1_FULL) + """ + subq = (session.query(self.stats_table.aperture, func.max(self.stats_table.expstart).label("max_created")) \ + .group_by(self.stats_table.aperture) + .subquery() + ) + + query = (session.query(self.stats_table.aperture, + self.stats_table.uncal_filename, + self.stats_table.cal_filename, + self.stats_table.cal_image, + self.stats_table.expstart, + self.stats_table.collapsed_rows, + self.stats_table.collapsed_columns, + self.stats_table.counts, + self.stats_table.bin_centers, + self.stats_table.entry_date) + .filter(self.stats_table.aperture == aperture) + .order_by(self.stats_table.entry_date) \ + .join(subq, self.stats_table.expstart == subq.c.max_created) + ) + + latest_data = query.all() + session.close() + + # Put the returned data in a dataframe. Include only the most recent entry. + # The query has already filtered to include only entries using the latest + # expstart value. + self.latest_data = pd.DataFrame(latest_data[-1:], columns=['aperture', 'uncal_filename', 'cal_filename', + 'cal_image', 'expstart_str', 'collapsed_rows', + 'collapsed_columns', 'counts', 'bin_centers', + 'entry_date']) + # Add a column of expstart values that are datetime objects + format_data = "%Y-%m-%dT%H:%M:%S.%f" + datetimes = [datetime.strptime(entry, format_data) for entry in self.latest_data['expstart_str']] + self.latest_data['expstart'] = datetimes + + + +class BiasMonitorPlots(): + """This is the top-level class, which will call the BiasMonitorData + class to get results from the bias monitor, and use the plotting + classes to create figures from the data. + + Paramters + --------- + instrument : str + Instrument name (e.g. nircam) + + Attributes + ---------- + + aperture : str + Aperture name (e.g. NRCA1_FULL) + + available_apertures : list + List of apertures present in the data from the database + + div : str + html div output by bokeh.components + + db : jwql.website.apps.jwql.monitor_bias_bokeh.BiasMonitorData + Object containing data retrieved from the bias stats database table + + instrument : str + Instrument name (e.g. nircam) + + histograms : dict + Keys are aperture names, and values are corresponding Bokeh figures + showing histograms of the signal in the dark exposure + + html_file: str + Name of html file to save plots into + + rowcol_plots : dict + Keys are aperture names, and values are corresponding Bokeh figures + showing the mean row and column signal in the dark exposure + + script : str + html script output by bokeh.components + + tabs: bokeh.models.widgets.Tabs + Tabs object containing one Tab for each aperture's plots + + trending_plots : dict + Keys are aperture names, and values are corresponding Bokeh figures + of the bias level versus time + + zerothgroup_plots : dict + Keys are aperture names, and values are corresponding Bokeh images + of the zeroth frames from dark exposures + """ + def __init__(self, instrument): + self.instrument = instrument + self.trending_plots = {} + self.zerothgroup_plots = {} + self.rowcol_plots = {} + self.histograms = {} + + # Get the data from the database + self.db = BiasMonitorData(self.instrument) + + # Now we need to loop over the available apertures and create plots for each + self.available_apertures = get_unique_values_per_column(self.db.stats_table, 'aperture') + + # Make sure all full frame apertures are present. If there are no data for a + # particular full frame entry, then produce an empty plot, in order to + # keep the plot layout consistent + self.ensure_all_full_frame_apertures() + + for aperture in self.available_apertures: + self.aperture = aperture + + # Retrieve data from database. + self.db.retrieve_trending_data(self.aperture) + self.db.retrieve_latest_data(self.aperture) + + # Create trending plots. One for each amplifier. + self.trending_plots[self.aperture] = TrendingPlot(self.db.trending_data).plots + + # Create a figure showing the zeroth group image + self.zerothgroup_plots[self.aperture] = ZerothGroupImage(self.db.latest_data).figure + + # Create plots showing median row and column values + self.rowcol_plots[self.aperture] = MedianRowColPlot(self.db.latest_data).plots + + # Create a plot of the histogram of the latest calibrated image + self.histograms[self.aperture] = HistogramPlot(self.db.latest_data).plot + + # Organize plots into tabs + self.create_tabs() + + # Save the tabbed plots using bokeh + self.save_tabs() + + # Modify the saved html file such that it works in our Django ecosystem + self.modify_bokeh_saved_html() + + def create_tabs(self): + """Organize the plots into a separate tab for each aperture + """ + tabs = [] + for aperture in FULL_FRAME_APERTURES[self.instrument.upper()]: + + bias_layout = layout([[self.trending_plots[aperture][1], self.trending_plots[aperture][2]], + [self.trending_plots[aperture][3], self.trending_plots[aperture][4]], + [self.zerothgroup_plots[aperture], self.histograms[aperture]], + [self.rowcol_plots[aperture]['collapsed_rows'], self.rowcol_plots[aperture]['collapsed_columns']] + ] + ) + bias_layout.sizing_mode = 'scale_width' + bias_tab = Panel(child=bias_layout, title=aperture) + tabs.append(bias_tab) + + # Build tabs + self.tabs = Tabs(tabs=tabs) + self.script, self.div = components(self.tabs) + + def ensure_all_full_frame_apertures(self): + """Be sure that self.available_apertures contains entires for all + full frame apertures. These are needed to make sure the plot layout + is consistent later + """ + full_apertures = FULL_FRAME_APERTURES[self.instrument.upper()] + for ap in full_apertures: + if ap not in self.available_apertures: + self.available_apertures.append(ap) + + def modify_bokeh_saved_html(self): + """Given an html string produced by Bokeh when saving bad pixel monitor plots, + make tweaks such that the page follows the general JWQL page formatting. + """ + # Insert into our html template and save + temp_vars = {'inst': self.instrument, 'plot_script': self.script, 'plot_div': self.div} + html_lines = file_html(self.tabs, CDN, f'{self.instrument} bias monitor', self.html_file, temp_vars) + + lines = html_lines.split('\n') + + # List of lines that Bokeh likes to save in the file, but we don't want + lines_to_remove = ["", + '', + ' ', + ''] + + # Our Django-related lines that need to be at the top of the file + hstring = """href="{{'/jwqldb/%s_bias_stats'%inst.lower()}}" name=test_link class="btn btn-primary my-2" type="submit">Go to JWQLDB page""" + newlines = ['{% extends "base.html" %}\n', "\n", + "{% block preamble %}\n", "\n", + f"{JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]} Bias Monitor- JWQL\n", "\n", + "{% endblock %}\n", "\n", + "{% block content %}\n", "\n", + '
\n', "\n", + f"

{JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]} Bias Monitor

\n", + "
\n", + f" View or Download {JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]} Bias Stats Table:\n" + ] + + # More lines that we want to have in the html file, at the bottom + endlines = ["\n", + "
\n", "\n", + "{% endblock %}" + ] + + for line in lines: + if line not in lines_to_remove: + newlines.append(line + '\n') + newlines = newlines + endlines + + html_lines = "".join(newlines) + + # Save the modified html + with open(self.html_file, "w") as file: + file.write(html_lines) + set_permissions(self.html_file) + + def save_tabs(self): + """Save the Bokeh tabs to an html file + """ + self.html_file = os.path.join(TEMPLATE_DIR, f'{self.instrument.lower()}_bias_plots.html') + output_file(self.html_file) + save(self.tabs) + set_permissions(self.html_file) + + +class HistogramPlot(): + """Class to create histogram plots of bias data + + Parameters + ---------- + data : pandas.DataFrame + Data to be plotted. Required columns include bin_right, bin_left, + counts, expstart_str + + Attributes + ---------- + data : pandas.DataFrame + Data to be plotted. Required columns include bin_right, bin_left, + counts, expstart_str + + plot : bokeh.plotting.figure + Figure containing the histogram plot + """ + def __init__(self, data): + self.data = data + self.create_plot() + + def create_plot(self): + """Create figure of data histogram + """ + x_label = 'Signal (DN)' + y_label = '# Pixels' + # Be sure data is not empty + if len(self.data) > 0: + # Be sure the array of histogram information is not empty + if len(self.data['counts'].iloc[0]) > 0: + + # In order to use Bokeh's quad, we need left and right bin edges, rather than bin centers + bin_centers = np.array(self.data['bin_centers'][0]) + half_widths = (bin_centers[1:] - bin_centers[0:-1]) / 2 + half_widths = np.insert(half_widths, 0, half_widths[0]) + self.data['bin_left'] = [bin_centers - half_widths] + self.data['bin_right'] = [bin_centers + half_widths] + + datestr = self.data['expstart_str'].iloc[0] + self.plot = figure(title=f'Calibrated data: Histogram, {datestr}', tools='pan,box_zoom,reset,wheel_zoom,save', + background_fill_color="#fafafa") + + # Keep only the columns where the data are a list + series = self.data.iloc[0] + series = series[['counts', 'bin_left', 'bin_right', 'bin_centers']] + source = ColumnDataSource(dict(series)) + self.plot.quad(top='counts', bottom=0, left='bin_left', right='bin_right', + fill_color="#C85108", line_color="#C85108", alpha=0.75, source=source) + + hover_tool = HoverTool(tooltips=f'@bin_centers DN: @counts') + self.plot.tools.append(hover_tool) + self.plot.xaxis.axis_label = x_label + self.plot.yaxis.axis_label = y_label + + else: + self.plot = PlaceholderPlot('Calibrated data: Histogram', x_label, y_label).plot + else: + self.plot = PlaceholderPlot('Calibrated data: Histogram', x_label, y_label).plot + + + +class MedianRowColPlot(): + """Class to create a plot of the median signal across rows + or columns + + Parameters + ---------- + data : pandas.DataFrame + Data to be plotted. Required columns include bin_right, bin_left, + counts, expstart_str + + Attributes + ---------- + data : pandas.DataFrame + Data to be plotted. Required columns include bin_right, bin_left, + counts, expstart_str + + plots : dict + Dictionary containing plots. Keys are 'collapsed_rows' and 'collapsed_columns', + and the values are the Bokeh figures + """ + def __init__(self, data): + self.data = data + self.create_plots() + + def create_plots(self): + """Create plots of median rows and columns + """ + self.plots = {} + for colname in ['collapsed_rows', 'collapsed_columns']: + self.plots[colname] = self.create_plot(colname) + + def create_plot(self, colname): + """Create a plot showing either the collapsed row or column info + + Parameters + ---------- + frame : pandas.DataFrame + Single column, containing the data to be plotted + + colname : str + Column name from DataFrame containing data to be plotted + + Returns + ------- + plot : bokeh.plotting.figure + Plot of the data contained in ``frame`` + """ + if 'row' in colname.lower(): + title_text = 'Row' + axis_text = 'Column Number' + elif 'column' in colname.lower(): + title_text = 'Column' + axis_text = 'Row Number' + + # Make sure there is data present + if len(self.data) > 0: + # Make sure that the colname column is not empty + if len(self.data[colname].iloc[0]) > 0: + datestr = self.data['expstart_str'].iloc[0] + title_str = f'Calibrated data: Collapsed {title_text}, {datestr}' + + plot = figure(title=title_str, tools='pan,box_zoom,reset,wheel_zoom,save', + background_fill_color="#fafafa") + + # Add a column containing pixel numbers to plot against + pix_num = np.arange(len(self.data[colname].iloc[0])) + self.data['pixel'] = [pix_num] + + series = self.data.iloc[0] + series = series[['pixel', colname]] + source = ColumnDataSource(dict(series)) + plot.scatter(x='pixel', y=colname, fill_color="#C85108", line_color="#C85108", + alpha=0.75, source=source) + + hover_text = axis_text.split(' ')[0] + hover_tool = HoverTool(tooltips=f'{hover_text} @pixel: @{colname}') + plot.tools.append(hover_tool) + plot.xaxis.axis_label = axis_text + plot.yaxis.axis_label = 'Median Signal (DN)' + else: + # If there is a latest_data entry, but the collapsed_row or collapsed_col + # columns are empty, then make a placeholder plot. + title_str = f'Calibrated data: Collapsed {title_text}' + plot = PlaceholderPlot(title_str, axis_text, 'Median Signal (DN)').plot + else: + # If there are no data, then create an empty placeholder plot + title_str = f'Calibrated data: Collapsed {title_text}' + plot = PlaceholderPlot(title_str, axis_text, 'Median Signal (DN)').plot + + return plot + + + +class TrendingPlot(): + """Class to create trending plots of bias level over time. There should be + 4 plots produced: 1 for each amplifier (with even and odd columns plotted in each). + + Parameters + ---------- + data : pandas.DataFrame + Data to be plotted + + Attributes + ---------- + data : pandas.DataFrame + Data to be plotted + + plots : dict + Dictionary containing plots. Keys are amplifier numbers (1 - 4), and values are + Bokeh figures containing the plots. + """ + def __init__(self, data): + self.data = data + self.create_plots() + + def create_amp_plot(self, amp_num, amp_data): + """Create a trending plot for a single amplifier + + Parameters + ---------- + amp_num : int + Amplifier number. 1 through 4 + + amp_data : pandas.DataFrame + DataFrame with trending data and dates for the amplifier + + Returns + ------- + plot : bokeh.plotting.figure + Figure containing the plot + """ + title_str = f'Uncal data: Amp {amp_num}' + x_label = 'Date' + y_label = 'Bias Level (DN)' + + if len(amp_data["expstart"]) > 0: + plot = figure(title=title_str, tools='pan,box_zoom,reset,wheel_zoom,save', + background_fill_color="#fafafa") + source = ColumnDataSource(amp_data) + even_col = f'amp{amp_num}_even_med' + odd_col = f'amp{amp_num}_odd_med' + + plot.scatter(x='expstart', y=even_col, fill_color="#C85108", line_color="#C85108", + alpha=0.75, source=source, legend_label='Even cols') + plot.scatter(x='expstart', y=odd_col, fill_color="#355C7D", line_color="#355C7D", + alpha=0.75, source=source, legend_label='Odd cols') + + # Make the x axis tick labels look nice + plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], + seconds=["%d %b %H:%M:%S.%3N"], + hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"] + ) + plot.xaxis.major_label_orientation = np.pi / 4 + + # Use the string representation of the time in the hover tool, rather than the + # datetime version. If you use the datetime version, and save this information + # to the html file, when trying to read and display the html file, jinja will + # interpret the format codes as html tags and crash with errors such as: + # "Encountered unknown tag 'd'. Jinja was looking for the following tags: 'endblock'. + # The innermost block that needs to be closed is 'block'" + hover_tool = HoverTool(tooltips=[('File:', '@uncal_filename'), + ('Even col bias:', f'@{even_col}'), + ('Odd col bias:', f'@{odd_col}'), + ('Date:', '@expstart_str') + ] + ) + #hover_tool.formatters = {'@expstart': 'datetime'} + plot.tools.append(hover_tool) + plot.xaxis.axis_label = x_label + plot.yaxis.axis_label = y_label + else: + # If there are no data, then create an empty placeholder plot + plot = PlaceholderPlot(title_str, x_label, y_label).plot + + return plot + + def create_plots(self): + """Create the 4 plots + """ + self.plots = {} + # Either all amps will have data, or all amps will be empty. No need to + # worry about some amps having data but others not. + # Create one plot per amplifier + for amp_num in range(1, 5): + cols_to_use = [col for col in self.data.columns if str(amp_num) in col] + cols_to_use.extend(['expstart', 'expstart_str', 'uncal_filename']) + subframe = self.data[cols_to_use] + self.plots[amp_num] = self.create_amp_plot(amp_num, subframe) + + +class ZerothGroupImage(): + """Class to create an image to show the zeroth group of a + calibrated dark file + + Parameters + ---------- + data : pandas.DataFrame + Data to be plotted. Required columns include bin_right, bin_left, + counts, expstart_str + + Attributes + ---------- + data : pandas.DataFrame + Data to be plotted. Required columns include bin_right, bin_left, + counts, expstart_str + + figure : bokeh.plotting.figure + Figure containing an image + """ + def __init__(self, data): + self.data = data + self.create_figure() + + def create_figure(self): + """Create the Bokeh figure + """ + if len(self.data['cal_image']) > 0: + if os.path.isfile(self.data['cal_image'].iloc[0]): + image = read_png(self.data['cal_image'].iloc[0]) + + datestr = self.data['expstart_str'].iloc[0] + + # Display the 32-bit RGBA image + ydim, xdim = image.shape + dim = max(xdim, ydim) + self.figure = figure(title=f'Calibrated Zeroth Group of Most Recent Dark: {datestr}', x_range=(0, xdim), y_range=(0, ydim), + tools='pan,box_zoom,reset,wheel_zoom,save') + self.figure.image_rgba(image=[image], x=0, y=0, dw=xdim, dh=ydim) + self.figure.xaxis.visible = False + self.figure.yaxis.visible = False + else: + # If the listed file is missing, create an empty plot + self.figure = PlaceholderPlot('Calibrated Zeroth Group of Most Recent Dark', '', '').plot + self.figure.xaxis.visible = False + self.figure.yaxis.visible = False + + else: + # If no file is given, create an empty plot + self.figure = PlaceholderPlot('Calibrated Zeroth Group of Most Recent Dark', '', '').plot + self.figure.xaxis.visible = False + self.figure.yaxis.visible = False diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py new file mode 100644 index 000000000..4160e9b87 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py @@ -0,0 +1,184 @@ +"""This module contains code for the cosmic ray monitor Bokeh plots. + +Authors +------- + + - Bryan Hilbert + +Use +--- + + This module is intended to be imported and use as such: + :: + + from jwql.website.apps.jwql import monitor_pages + monitor_template = monitor_pages.CosmicRayMonitor('nircam', 'NRCA1_FULL') + +Bokeh figures will then be in: + monitor_template.history_figure + monitor_template.histogram_figure +""" + +from datetime import datetime, timedelta +import os + +from bokeh.models import BasicTickFormatter, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d +from bokeh.plotting import figure +import matplotlib.pyplot as plt +import numpy as np + +from jwql.database.database_interface import session +from jwql.database.database_interface import MIRICosmicRayQueryHistory, MIRICosmicRayStats +from jwql.database.database_interface import NIRCamCosmicRayQueryHistory, NIRCamCosmicRayStats +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE + + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class CosmicRayMonitor(): + def __init__(self, instrument, aperture): + """Create instance + + Parameters + ---------- + instrument : str + Name of JWST instrument. e.g. 'nircam' + + aperture : str + Name of aperture. e.g. 'NRCA1_FULL' + """ + self._instrument = instrument + self._aperture = aperture + self.create_figures() + + def create_figures(self): + """Wrapper function to create both the history and histogram plots + for a given instrument/aperture. + """ + # Get the data + self.load_data() + + # Create the history plot + self.history_figure = self.history_plot() + + # Create the histogram plot + self.histogram_figure = self.histogram_plot() + + def get_histogram_data(self): + """Get data required to create cosmic ray histogram from the + database query. + """ + + self.mags = [row.magnitude for row in self.cosmic_ray_table] + + # If there are no data, then create something reasonable + if len(self.mags) == 0: + self.mags = [[0]] + + last_hist_index = -1 + # We'll never see CRs with magnitudes above 65535. + # Let's fix the bins for now, and see some data to check + # if they are reasonable + bins = np.arange(-65000, 66000, 5000) + hist = plt.hist(self.mags[last_hist_index], bins=bins) + + self.bin_left = np.array([bar.get_x() for bar in hist[2]]) + self.amplitude = [bar.get_height() for bar in hist[2]] + self.bottom = [bar.get_y() for bar in hist[2]] + deltas = self.bin_left[1:] - self.bin_left[0: -1] + self.bin_width = np.append(deltas[0], deltas) + + def get_history_data(self): + """Extract data on the history of cosmic ray numbers from the + database query result + """ + self.times = [row.obs_end_time for row in self.cosmic_ray_table] + self.rate = [row.jump_rate for row in self.cosmic_ray_table] + + def histogram_plot(self): + """Create the histogram figure of CR magnitudes. + """ + self.get_histogram_data() + + title = f'Magnitudes: {self._instrument}, {self._aperture}' + fig = figure(title=title, tools='zoom_in, zoom_out, box_zoom, pan, reset, save', background_fill_color="#fafafa") + fig.quad(top=self.amplitude, bottom=0, left=self.bin_left, right=self.bin_left + self.bin_width, + fill_color="navy", line_color="white", alpha=0.5) + + fig.y_range.start = 0 + fig.xaxis.formatter.use_scientific = False + fig.xaxis.major_label_orientation = np.pi / 4 + + hover_tool = HoverTool(tooltips=[('Num CRs: ', '@top{int}')]) + fig.tools.append(hover_tool) + + fig.xaxis.axis_label = 'Cosmic Ray Magnitude (DN)' + fig.yaxis.axis_label = 'Number of Cosmic Rays' + fig.grid.grid_line_color = "white" + fig.sizing_mode = "scale_width" + return fig + + def history_plot(self): + """Create the plot of CR rates versus time + """ + self.get_history_data() + + # If there are no data, create a reasonable looking empty plot + if len(self.times) == 0: + self.times = [datetime(2021, 12, 25), datetime(2021, 12, 26)] + self.rate = [0, 0] + + source = ColumnDataSource(data={'x': self.times, 'y': self.rate}) + + # Create a useful plot title + title = f'CR Rates: {self._instrument}, {self._aperture}' + + # Create figure + fig = figure(tools='zoom_in, zoom_out, box_zoom, pan, reset, save', x_axis_type='datetime', + title=title, x_axis_label='Date', y_axis_label='CR rate (per pix per sec)') + + # For cases where the plot contains only a single point, force the + # plot range to something reasonable + if len(self.times) < 2: + fig.x_range = Range1d(self.times[0] - timedelta(days=1), self.times[0] + timedelta(days=1)) + fig.y_range = Range1d(self.rate[0] - 0.5 * self.rate[0], self.rate[0] + 0.5 * self.rate[0]) + + data = fig.scatter(x='x', y='y', line_width=5, line_color='blue', source=source) + + # Make the x axis tick labels look nice + fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], + seconds=["%d %b %H:%M:%S.%3N"], + hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"] + ) + fig.xaxis.major_label_orientation = np.pi / 4 + fig.yaxis[0].formatter = BasicTickFormatter(use_scientific=True, precision=2) + + hover_tool = HoverTool(tooltips=[('Value', '@y'), + ('Date', '@x{%d %b %Y %H:%M:%S}') + ], mode='mouse', renderers=[data]) + hover_tool.formatters = {'@x': 'datetime'} + fig.tools.append(hover_tool) + fig.sizing_mode = "scale_width" + return fig + + def identify_tables(self): + """Determine which database tables as associated with + a given instrument""" + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()] + self.query_table = eval('{}CosmicRayQueryHistory'.format(mixed_case_name)) + self.stats_table = eval('{}CosmicRayStats'.format(mixed_case_name)) + + def load_data(self): + """Query the database tables to get data""" + + # Determine which database tables are needed based on instrument + self.identify_tables() + + # Query database for all data with a matching aperture + self.cosmic_ray_table = session.query(self.stats_table) \ + .filter(self.stats_table.aperture == self._aperture) \ + .all() diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py index 941eed901..d57a83c4b 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -15,196 +15,741 @@ :: from jwql.website.apps.jwql import monitor_pages - monitor_template = monitor_pages.DarkMonitor('NIRCam', 'NRCA3_FULL') - script, div = monitor_template.embed("dark_current_time_figure") + monitor_template = monitor_pages.DarkMonitor('nircam') """ import os -from astropy.io import fits from astropy.time import Time -from bokeh.models.tickers import LogTicker -from datetime import datetime +from bokeh.models import ColorBar, ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearAxis +from bokeh.models import Range1d, Text, Whisker +from bokeh.plotting import figure +from datetime import datetime, timedelta import numpy as np - -from jwql.database.database_interface import session -from jwql.database.database_interface import NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent -from jwql.database.database_interface import NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent -from jwql.database.database_interface import MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent -from jwql.database.database_interface import NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent -from jwql.database.database_interface import FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent +from PIL import Image +from sqlalchemy import func +from sqlalchemy.sql.expression import and_ + +from jwql.database.database_interface import get_unique_values_per_column, session +from jwql.database.database_interface import NIRCamDarkPixelStats, NIRCamDarkDarkCurrent +from jwql.database.database_interface import NIRISSDarkPixelStats, NIRISSDarkDarkCurrent +from jwql.database.database_interface import MIRIDarkPixelStats, MIRIDarkDarkCurrent +from jwql.database.database_interface import NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent +from jwql.database.database_interface import FGSDarkPixelStats, FGSDarkDarkCurrent +from jwql.utils.constants import FULL_FRAME_APERTURES from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.utils import get_config -from jwql.bokeh_templating import BokehTemplate +from jwql.utils.utils import get_config, read_png +from jwql.website.apps.jwql.bokeh_utils import PlaceholderPlot SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) OUTPUTS_DIR = get_config()['outputs'] -class DarkMonitor(BokehTemplate): - - # Combine instrument and aperture into a single property because we - # do not want to invoke the setter unless both are updated - @property - def aperture_info(self): - return (self._instrument, self._aperture) - - @aperture_info.setter - def aperture_info(self, info): - self._instrument, self._aperture = info - self.pre_init() - self.post_init() - - def _dark_mean_image(self): - """Update bokeh objects with mean dark image data.""" - - # Open the mean dark current file and get the data - if len(self.pixel_table) != 0: - mean_dark_image_file = self.pixel_table[-1].mean_dark_image_file - mean_slope_dir = os.path.join(OUTPUTS_DIR, 'dark_monitor', 'mean_slope_images') - mean_dark_image_path = os.path.join(mean_slope_dir, mean_dark_image_file) - with fits.open(mean_dark_image_path) as hdulist: - data = hdulist[1].data +class DarkHistPlot(): + """Create a histogram plot of dark current values for a given aperture + + Attributes + ---------- + aperture: str + Aperture name (e.g. NRCA1_FULL) + + data : dict + Dictionary of histogram data. Keys are amplifier values. + Values are tuples of (x values, y values) + + plot : bokeh.figure + Figure containing the histogram plot + """ + def __init__(self, aperture, data): + """Create the plot + + Parameters + ---------- + aperture : str + Name of the aperture (e.g. 'NRCA1_FULL') + + data : dict + Histogram data. Keys are amplifier values (e.g. '1'). Values are + tuples of (x values, y values) + """ + self.data = data + self.aperture = aperture + self.create_plot() + + def calc_bin_edges(self, centers): + """Given an array of values corresponding to the center of a series + of histogram bars, calculate the bar edges + + Parameters + ---------- + centers : numpy.ndarray + Array of central values + """ + deltax = (centers[1:] - centers[0:-1]) + deltax_left = np.insert(deltax, 0, deltax[0]) + deltax_right = np.append(deltax, deltax[-1]) + left = centers - 0.5 * deltax_left + right = centers + 0.5 * deltax_right + return left, right + + def create_plot(self): + """Place the data in a CoumnDataSource and create the plot + """ + if len(self.data) > 0: + # Specify which key ("amplifier") to show. If there is data for amp='5', + # show that, as it will be the data for the entire detector. If not then + # we have subarray data and should use amp='1'. + # A bug in the dark monitor means that for NIRISS, there is no amp = '5' + # entry at the moment. So we set amp=1. Normally this would only plot the + # histogram for amp 1, but since the dark_monitor output at the moment is + # wrong and the histogram for the entire detector is being saved in the entries + # for each amp, we can get away with using use_amp=1 at the moment. + if '5' in self.data: + use_amp = '5' + else: + use_amp = '1' + + title_str = f'{self.aperture}: Dark Rate Histogram' + x_label = 'Dark Rate (DN/sec)' + y_label = 'Number of Pixels' + + # If there are histogram data for multiple amps, then we can plot each histogram. + if len(self.data) > 1: + # Looks like the histogram data for the individual amps is not being saved + # correctly. The data are identical for the full aperture and all amps. So + # for the moment, show only the full aperture data (by setting per_amp=False). + per_amp = False + main_label = 'Full Aperture' + + # Colors to use for the amp-dpendent plots + colors = ['red', 'orange', 'green', 'gray'] + else: + per_amp = False + + mainx, mainy = self.data[use_amp] + mainx = np.array(mainx) + mainy = np.array(mainy) + + # Calculate edge values + left_edges, right_edges = self.calc_bin_edges(mainx) + + # Create the CDF + pdf = mainy / sum(mainy) + cdf = np.cumsum(pdf) + + # Create ColumnDataSource for main plot and CDF line + source = ColumnDataSource(data=dict(dark_rate=mainx, + num_pix=mainy, + cdf=cdf, + left_edges=left_edges, + right_edges=right_edges + ) + ) + + self.plot = figure(title=title_str, + tools='pan,box_zoom,reset,wheel_zoom,save', background_fill_color="#fafafa") + + # Plot the histogram for the "main" amp + self.plot.quad(top='num_pix', bottom=0, left='left_edges', right='right_edges', + fill_color="navy", line_color="white", alpha=0.5, source=source) + hover_tool = HoverTool(tooltips=[('Dark rate:', '@dark_rate'), + ('Num Pix:', '@num_pix'), + ('CDF:', '@cdf') + ], + mode='mouse') + self.plot.tools.append(hover_tool) + + # If there are multiple amps to be plotted, do that here + if per_amp: + self.plot.quad(top=mainy, bottom=0, left=left_edges, right=right_edges, + fill_color="navy", line_color="white", alpha=0.5, legend_label='Full Aperture') + # Repeat for all amps. Be sure to skip the amp that's already completed + for amp, color in zip(self.data, colors): + if amp != use_amp: + x, y = self.data[amp] + x = np.array(x) + y = np.array(y) + amp_left_edges, amp_right_edges = self.calc_bin_edges(x) + self.plot.quad(top=y, bottom=0, left=amp_left_edges, right=amp_right_edges, + fill_color=color, line_color="white", alpha=0.25, legend_label=f'Amp {amp}') + + # Set ranges + self.plot.extra_y_ranges = {"cdf_line": Range1d(0,1)} + self.plot.add_layout(LinearAxis(y_range_name='cdf_line', axis_label="Cumulative Distribution"), "right") + + # Add cumulative distribution function + self.plot.line('dark_rate', 'cdf', source=source, line_color="orange", line_width=2, alpha=0.7, + y_range_name='cdf_line', color="red", legend_label="CDF") + + # Set the initial x range to include 99.8% of the distribution + disp_index = np.where((cdf > 0.001) & (cdf < 0.999))[0] + + # Set labels and ranges + self.plot.xaxis.axis_label = x_label + self.plot.yaxis.axis_label = y_label + self.plot.y_range.start = 0 + self.plot.y_range.end = np.max(mainy) * 1.1 + self.plot.x_range.start = mainx[disp_index[0]] + self.plot.x_range.end = mainx[disp_index[-1]] + self.plot.legend.location = "top_left" + self.plot.legend.background_fill_color = "#fefefe" + self.plot.grid.grid_line_color="white" else: - # Cover the case where the database is empty - data = np.zeros((10, 10)) - - # Update the plot with the data and boundaries - y_size, x_size = np.shape(data) - self.refs["mean_dark_source"].data['image'] = [data] - self.refs["stamp_xr"].end = x_size - self.refs["stamp_yr"].end = y_size - self.refs["mean_dark_source"].data['dw'] = [x_size] - self.refs["mean_dark_source"].data['dh'] = [x_size] - - # Set the image color scale - self.refs["log_mapper"].high = 0 - self.refs["log_mapper"].low = -.2 - - # This should add ticks to the colorbar, but it doesn't - self.refs["mean_dark_cbar"].ticker = LogTicker() - - # Add a title - self.refs['mean_dark_image_figure'].title.text = self._aperture - self.refs['mean_dark_image_figure'].title.align = "center" - self.refs['mean_dark_image_figure'].title.text_font_size = "20px" - - def pre_init(self): - # Start with default values for instrument and aperture because - # BokehTemplate's __init__ method does not allow input arguments - try: - dummy_instrument = self._instrument - dummy_aperture = self._aperture - except AttributeError: - self._instrument = 'NIRCam' - self._aperture = 'NRCA1_FULL' - - self._embed = True - - # Fix aperture/detector name discrepency - if self._aperture in ['NRCA5_FULL', 'NRCB5_FULL']: - self.detector = '{}LONG'.format(self._aperture[0:4]) + # If self.data is empty, then make a placeholder plot + self.plot = PlaceholderPlot(title_str, x_label, y_label).plot + + +class DarkImagePlot(): + """Creates a figure that displays a mean dark current image + held in a png file + + Attributes + ---------- + aperture : str + Name of aperture (e.g. NRCA1_FULL) + + dark_image_picture : str + Name of png file containing the mean dark current image figure + created by the dark monitor + + plot : bokeh.figure + Figure containing the dark current image + """ + def __init__(self, data, aperture): + """Create the figure + """ + self.dark_image_picture = data + self.aperture = aperture + + self.create_plot() + + def create_plot(self): + """Takes the input filename, reads it in, and places it in a figure. If + the given filename doesn't exist, or if no filename is given, it produces + an empty figure that can be used as a placeholder + """ + if self.dark_image_picture is not None: + if os.path.isfile(self.dark_image_picture): + view = read_png(self.dark_image_picture) + + # Display the 32-bit RGBA image + ydim, xdim = view.shape + dim = max(xdim, ydim) + self.plot = figure(x_range=(0, xdim), y_range=(0, ydim), tools='pan,box_zoom,reset,wheel_zoom,save') + self.plot.image_rgba(image=[view], x=0, y=0, dw=xdim, dh=ydim) + self.plot.xaxis.visible = False + self.plot.yaxis.visible = False + + else: + # If the given file is missing, create an empty plot + self.plot = PlaceholderPlot(self.aperture, '', '').plot + self.plot.xaxis.visible = False + self.plot.yaxis.visible = False else: - self.detector = self._aperture.split('_')[0] + # If no filename is given, then create an empty plot + self.plot = PlaceholderPlot(self.aperture, '', '').plot + self.plot.xaxis.visible = False + self.plot.yaxis.visible = False - # App design - self.format_string = None - self.interface_file = os.path.join(SCRIPT_DIR, "yaml", "dark_monitor_interface.yaml") - # Load data tables - self.load_data() +class DarkMonitorData(): + """Retrive dark monitor data from the database tables - # Data for mean dark versus time plot - datetime_stamps = [row.obs_mid_time for row in self.dark_table] + Attributes + ---------- + detector : str + Detector name (e.g. 'NRCA1') - # Data for dark current histogram plot (full detector) - # Just show the last histogram, which is the one most recently - # added to the database - last_hist_index = -1 + instrument : str + Name of JWST instrument e.g. 'nircam' - # Return dummy data if the database was empty - if len(datetime_stamps) == 0: - datetime_stamps = [datetime(2014, 1, 1, 12, 0, 0), datetime(2014, 1, 2, 12, 0, 0)] - self.dark_current = [0., 0.1] - self.full_dark_bin_center = np.array([0., 0.01, 0.02]) - self.full_dark_amplitude = [0., 1., 0.] - else: - self.dark_current = [row.mean for row in self.dark_table] - self.full_dark_bin_center = np.array([row.hist_dark_values for - row in self.dark_table])[last_hist_index] - self.full_dark_amplitude = [row.hist_amplitudes for - row in self.dark_table][last_hist_index] + pixel_data : list + Data returned from the pixel_table - times = Time(datetime_stamps, format='datetime', scale='utc') # Convert to MJD - self.timestamps = times.mjd - self.last_timestamp = datetime_stamps[last_hist_index].isoformat() - self.full_dark_bottom = np.zeros(len(self.full_dark_amplitude)) - deltas = self.full_dark_bin_center[1:] - self.full_dark_bin_center[0: -1] - self.full_dark_bin_width = np.append(deltas[0], deltas) + pixel_table : sqlalchemy.orm.decl_api.DeclarativeMeta + Dark montior bad pixel list table - def post_init(self): + pixel_table_columns : list + List of columns in the pixel_table - self._update_dark_v_time() - self._update_hist() - self._dark_mean_image() + stats_data : list + Data returned from the stats_table + + stats_table : sqlalchemy.orm.decl_api.DeclarativeMeta + Dark monitor table giving dark current statistics + + stats_table_columns : list + List of columns in the stats_table + """ + def __init__(self, instrument_name): + """Connect to the correct tables for the given instrument + """ + self.instrument = instrument_name + self.identify_tables() def identify_tables(self): """Determine which dark current database tables as associated with a given instrument""" - - mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()] - self.query_table = eval('{}DarkQueryHistory'.format(mixed_case_name)) + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument.lower()] self.pixel_table = eval('{}DarkPixelStats'.format(mixed_case_name)) self.stats_table = eval('{}DarkDarkCurrent'.format(mixed_case_name)) - def load_data(self): - """Query the database tables to get data""" + # Get a list of column names for each + self.stats_table_columns = self.stats_table.metadata.tables[f'{self.instrument.lower()}_dark_dark_current'].columns.keys() + self.pixel_table_columns = self.pixel_table.metadata.tables[f'{self.instrument.lower()}_dark_pixel_stats'].columns.keys() + + def retrieve_data(self, aperture, get_pixtable_for_detector=False): + """Get all nedded data from the database tables. + + Parameters + ---------- + aperture : str + Name of aperture for which data are retrieved (e.g. NRCA1_FULL) + + get_pixtable_for_detector : bool + If True, query self.pixel_table (e.g. NIRCamDarkPixelStats) for the + detector associated with the given aperture. + """ + # Query database for all data in DarkDarkCurrent with a matching aperture + self.stats_data = session.query(self.stats_table) \ + .filter(self.stats_table.aperture == aperture) \ + .all() - # Determine which database tables are needed based on instrument - self.identify_tables() + if get_pixtable_for_detector: + self.detector = aperture.split('_')[0].upper() + # The MIRI imaging detector does not line up with the full frame aperture. Fix that here + if self.detector == 'MIRIM': + self.detector = 'MIRIMAGE' + + # NIRCam LW detectors use 'LONG' rather than 5 in the pixel_table + if '5' in self.detector: + self.detector = self.detector.replace('5', 'LONG') + + # For the given detector, get the latest entry for each bad pixel type, and + # return the bad pixel type, detector, and mean dark image file + subq = (session + .query(self.pixel_table.type, func.max(self.pixel_table.entry_date).label("max_created")) + .filter(self.pixel_table.detector == self.detector) + .group_by(self.pixel_table.type) + .subquery() + ) + + query = (session.query(self.pixel_table.type, self.pixel_table.detector, self.pixel_table.mean_dark_image_file) + .join(subq, self.pixel_table.entry_date == subq.c.max_created) + ) + + self.pixel_data = query.all() + session.close() - # Query database for all data in NIRCamDarkDarkCurrent with a matching aperture - self.dark_table = session.query(self.stats_table) \ - .filter(self.stats_table.aperture == self._aperture) \ - .all() - self.pixel_table = session.query(self.pixel_table) \ - .filter(self.pixel_table.detector == self.detector) \ - .all() +class DarkMonitorPlots(): + """This is the top-level class, which will call the DarkMonitorData + class to get results from the dark monitor, and use DarkHistPlot, + DarkTrendPlot, and DarkImagePlot to create figures from the data. - session.close() + Attributes + ---------- + aperture : str + Name of the aperture used for the dark current (e.g. + ``NRCA1_FULL``) + + available_apertures : list + List of apertures for a given instrument that are present in + the dark monitor database tables + + dark_image_data : dict + Dictionary with aperture names as keys, and Bokeh images + as values. + + dark_image_picture : str + Filename of the png file containing an image of the mean dark current + + db : DarkMonitorData + Instance of DarkMonitorData that contians dark monitor data + retrieved from the database + + data_dir : str + Path into which new dark files will be copied to be worked on + + hist_data : dict + Dictionary of histogram data, with amplifier values as keys + + hist_plots : dict + Dictionary with aperture names as keys, and Bokeh histogram + plots as values. - def _update_dark_v_time(self): + instrument : str + Name of instrument used to collect the dark current data - # Define y range of dark current v. time plot - buffer_size = 0.05 * (max(self.dark_current) - min(self.dark_current)) - self.refs['dark_current_yrange'].start = min(self.dark_current) - buffer_size - self.refs['dark_current_yrange'].end = max(self.dark_current) + buffer_size + mean_dark : dict + Mean dark current values, with amplifiers as keys - # Define x range of dark current v. time plot - horizontal_half_buffer = (max(self.timestamps) - min(self.timestamps)) * 0.05 - if horizontal_half_buffer == 0: - horizontal_half_buffer = 1. # day - self.refs['dark_current_xrange'].start = min(self.timestamps) - horizontal_half_buffer - self.refs['dark_current_xrange'].end = max(self.timestamps) + horizontal_half_buffer + mean_slope_dir : str + Directory containing the mean dark current images output + by the dark monitor - # Add a title - self.refs['dark_current_time_figure'].title.text = self._aperture - self.refs['dark_current_time_figure'].title.align = "center" - self.refs['dark_current_time_figure'].title.text_font_size = "20px" - def _update_hist(self): + obstime : dict + Observation times associated with mean_dark, with amplifiers as keys - # Define y range of dark current histogram - buffer_size = 0.05 * (max(self.full_dark_amplitude) - min(self.full_dark_bottom)) - self.refs['dark_histogram_yrange'].start = min(self.full_dark_bottom) - self.refs['dark_histogram_yrange'].end = max(self.full_dark_amplitude) + buffer_size + output_dir : str + Path into which outputs will be placed - # Define x range of dark current histogram - self.refs['dark_histogram_xrange'].start = min(self.full_dark_bin_center) - self.refs['dark_histogram_xrange'].end = max(self.full_dark_bin_center) + pixel_table : sqlalchemy table + Table containing lists of hot/dead/noisy pixels found for each + instrument/detector - # Add a title - self.refs['dark_full_histogram_figure'].title.text = self._aperture - self.refs['dark_full_histogram_figure'].title.align = "center" - self.refs['dark_full_histogram_figure'].title.text_font_size = "20px" + query_start : float + MJD start date to use for querying MAST + + stats_table : sqlalchemy table + Table containing dark current analysis results. Mean/stdev + values, histogram information, Gaussian fitting results, etc. + + stdev_dark : dict + Standard deviation of dark current values, with amplifiers as keys + + trending_plots : dict + Dictionary with aperture names as keys, and Bokeh scatter + plots as values. + + _amplifiers : numpy.ndarray + Array of amplifier values from the database table + + _entry_dates : numpy.ndarray + Array of entry dates from the database table + + _mean : numpy.ndarray + Array of mean dark current values from the database table + + _obs_mid_time : numpy.ndarray + Array of observation times from the database table + + _stats_mean_dark_image_files : numpy.ndarray + Array of mean dark current image filenames from the database table + + _stats_numfiles : numpy.ndarray + Array of the number of files used to create each mean dark, from the database table + + _stdev : numpy.ndarray + Array of standard deviation of dark current values from the database table + """ + def __init__(self, instrument): + """Query the database, get the data, and create the plots + """ + self.mean_slope_dir = os.path.join(OUTPUTS_DIR, 'dark_monitor', 'mean_slope_images') + self.instrument = instrument + self.hist_plots = {} + self.trending_plots = {} + self.dark_image_data = {} + + # Get the data from the database + self.db = DarkMonitorData(self.instrument) + + # Now we need to loop over the available apertures and create plots for each + self.available_apertures = get_unique_values_per_column(self.db.stats_table, 'aperture') + + # Require entries for all full frame apertures. If there are no data for a + # particular full frame entry, then produce an empty plot, in order to + # keep the plot layout consistent + self.ensure_all_full_frame_apertures() + + # List of full frame aperture names + full_apertures = FULL_FRAME_APERTURES[instrument.upper()] + + for aperture in self.available_apertures: + self.aperture = aperture + + # Retrieve data from database. Since the mean dark image plots are + # produced by the dark monitor itself, all we need for that is the + # name of the file. then we need the histogram and trending data. All + # of this is in the dark monitor stats table. No need to query the + # dark monitor pixel table. + self.db.retrieve_data(self.aperture, get_pixtable_for_detector=False) + self.stats_data_to_lists() + self.get_mean_dark_image_from_stats_table() + + # Create the mean dark image figure + self.dark_image_data[self.aperture] = DarkImagePlot(self.dark_image_picture, self.aperture).plot + + # Organize the data to create the histogram plot + self.get_latest_histogram_data() + + # Organize the data to create the trending plot + self.get_trending_data() + + # Now that we have all the data, create the acutal plots + self.hist_plots[aperture] = DarkHistPlot(self.aperture, self.hist_data).plot + self.trending_plots[aperture] = DarkTrendPlot(self.aperture, self.mean_dark, self.stdev_dark, self.obstime).plot + + def ensure_all_full_frame_apertures(self): + """Be sure that self.available_apertures contains entires for all + full frame apertures. These are needed to make sure the plot layout + is consistent later + """ + full_apertures = FULL_FRAME_APERTURES[self.instrument.upper()] + for ap in full_apertures: + if ap not in self.available_apertures: + self.available_apertures.append(ap) + + def extract_times_from_filename(self, filename): + """Based on the mean dark filename produced by the dark monitor, extract the + starting and ending times covered by the file. + + Parameters + ---------- + filename : str + Name of file to be examined + + Returns + ------- + starttime : datetime.datetime + Datetime of the beginning of the range covered by the file + + endtime : datetime.datetime + Datetime of the end of the range covered by the file + """ + file_parts = filename.split('_') + start = Time(file_parts[3], format='mjd') + end = Time(file_parts[5], format='mjd') + return start.tt.datetime, end.tt.datetime + + def get_mean_dark_image_from_stats_table(self): + """Get the name of the mean dark image file to be displayed + """ + self.dark_image_picture = None + if len(self._stats_mean_dark_image_files) > 0: + # Grab the most recent entry + image_path = os.path.join(self.mean_slope_dir, self._stats_mean_dark_image_files[-1].replace('fits', 'png')) + if os.path.isfile(image_path): + self.dark_image_picture = image_path + + def get_latest_histogram_data(self): + """Organize data for histogram plot. In this case, we only need the + most recent entry for the aperture. Note that for full frame data, + there will be one entry per amplifier, e.g. '1', '2', '3', '4', for + the four quadrants, as well as a '5' entry, which covers the entire + detector. For subarray data, there will be a single entry with an + amplifier value of '1'. + + This function assumes that the data from the database have already + been filtered such that all entries are for the aperture of interest. + """ + self.hist_data = {} + if len(self._entry_dates) > 0: + # Find the index of the most recent entry + #self._aperture_entries = np.where((self._apertures == aperture))[0] + latest_date = np.max(self._entry_dates) #[self._aperture_entries]) + + # Get indexes of entries for all amps that were added in the + # most recent run of the monitor for the aperture. All entries + # for a given run are added to the database within a fraction of + # a second, so using a time range of a few seconds should be fine. + delta_time = timedelta(seconds=10) + most_recent_idx = np.where(self._entry_dates > (latest_date - delta_time))[0] + + # Store the histogram data in a dictionary where the keys are the + # amplifier values (note that these are strings e.g. '1''), and the + # values are tuples of (x, y) lists + for idx in most_recent_idx: + self.hist_data[self.db.stats_data[idx].amplifier] = (self.db.stats_data[idx].hist_dark_values, + self.db.stats_data[idx].hist_amplitudes) + + def get_trending_data(self): + """Organize data for the trending plot. Here we need all the data for + the aperture. Keep amplifier-specific data separated. + """ + # Separate the trending data by amplifier + self.mean_dark = {} + self.stdev_dark = {} + self.obstime = {} + + if len(self._amplifiers) > 0: + amp_vals = np.unique(np.array(self._amplifiers)) + for amp in amp_vals: + amp_rows = np.where(self._amplifiers == amp)[0] + self.mean_dark[amp] = self._mean[amp_rows] + self.stdev_dark[amp] = self._stdev[amp_rows] + self.obstime[amp] = self._obs_mid_time[amp_rows] + + def stats_data_to_lists(self): + """Create arrays from some of the stats database columns that are + used by multiple plot types + """ + #apertures = np.array([e.aperture for e in self.db.stats_data]) + self._amplifiers = np.array([e.amplifier for e in self.db.stats_data]) + self._entry_dates = np.array([e.entry_date for e in self.db.stats_data]) + self._mean = np.array([e.mean for e in self.db.stats_data]) + self._stdev = np.array([e.stdev for e in self.db.stats_data]) + self._obs_mid_time = np.array([e.obs_mid_time for e in self.db.stats_data]) + self._stats_mean_dark_image_files = np.array([e.mean_dark_image_file for e in self.db.stats_data]) + self._stats_numfiles = np.array([len(e.source_files) for e in self.db.stats_data]) + + +class DarkTrendPlot(): + """Create the dark current trending plot (mean dark rate vs time) for + the given aperture. + + Attributes + ---------- + aperture : str + Name of aperture (e.g. NRCA1_FULL) + + mean_dark : dict + Trending data. Keys are amplifier values (e.g. '1'). Values are + lists of mean dark rates + + stdev_dark : dict + Standard deviation of the dark rate data. Keys are amplifier values + (e.g. '1'). Values are lists of dark rate standard deviations + + obstime : dict + Observation time associated with the dark rates. Keys are amplifier + values (e.g. '1'). Values are lists of datetime objects + + plot : bokeh.figure + Figure containing trending plot + + """ + def __init__(self, aperture, mean_dark, stdev_dark, obstime): + """Creates the plot given the input data + + Parameters + ---------- + aperture : str + Name of the aperture (e.g. 'NRCA1_FULL') + + mean_dark : dict + Trending data. Keys are amplifier values (e.g. '1'). Values are + lists of mean dark rates + + stdev_dark : dict + Standard deviation of the dark rate data. Keys are amplifier values + (e.g. '1'). Values are lists of dark rate standard deviations + + obstime : dict + Observation time associated with the dark rates. Keys are amplifier + values (e.g. '1'). Values are lists of datetime objects + """ + self.aperture = aperture + self.mean_dark = mean_dark + self.stdev_dark = stdev_dark + self.obstime = obstime + self.create_plot() + + def create_plot(self): + """Takes the data, places it in a ColumnDataSource, and creates the figure + """ + if len(self.mean_dark) > 0: + # Specify which key ("amplifier") to show. If there is data for amp='5', + # show that, as it will be the data for the entire detector. If not then + # we have subarray data and should use amp='1'. + # A bug in the dark monitor means that for NIRISS, there is no amp = '5' + # entry at the moment. So we set amp=1. Normally this would only plot the + # histogram for amp 1, but since the dark_monitor output at the moment is + # wrong and the histogram for the entire detector is being saved in the entries + # for each amp, we can get away with using use_amp=1 at the moment. + if '5' in self.mean_dark: + use_amp = '5' + legend_label = 'Full aperture' + else: + use_amp = '1' + legend_label = 'Amp 1' + + # If there are trending data for multiple amps, then we can plot each + if len(self.mean_dark) > 1: + # Looks like the histogram data for the individual amps is not being saved + # correctly. The data are identical for the full aperture and all amps. So + # for the moment, show only the full aperture data (by setting per_amp=False). + per_amp = False + main_label = 'Full Aperture' + + # Colors to use for the amp-dpendent plots + colors = ['red', 'orange', 'green', 'grey'] + else: + per_amp = False + + error_lower = self.mean_dark[use_amp] - self.stdev_dark[use_amp] + error_upper = self.mean_dark[use_amp] + self.stdev_dark[use_amp] + + # Create a ColumnDataSource for the main amp to use + source = ColumnDataSource(data=dict(mean_dark=self.mean_dark[use_amp], + stdev_dark=self.stdev_dark[use_amp], + error_lower=error_lower, + error_upper=error_upper, + time=self.obstime[use_amp] + ) + ) + self.plot = figure(title=f'{self.aperture}: Mean +/- 1-sigma Dark Rate', tools='pan,box_zoom,reset,wheel_zoom,save', + background_fill_color="#fafafa") + + # Plot the "main" amp data along with error bars + self.plot.scatter(x='time', y='mean_dark', fill_color="navy", alpha=0.75, source=source, legend_label=legend_label) + self.plot.add_layout(Whisker(source=source, base="time", upper="error_upper", lower="error_lower", line_color='navy')) + hover_tool = HoverTool(tooltips=[('Dark rate:', '@mean_dark'), + ('Date:', '@time{%d %b %Y}') + ]) + hover_tool.formatters = {'@time': 'datetime'} + self.plot.tools.append(hover_tool) + + # If there are multiple amps to plot, do that here + if per_amp: + amp_source = {} + # Repeat for all amps. Be sure to skip the amp that's already completed + for amp, color in zip(self.mean_dark, colors): + if amp != use_amp: + amp_source[amp] = ColumnDataSource(data=dict(mean_dark=self.mean_dark[amp], + stdev_dark=self.stdev_dark[amp], + time=self.obstime[amp] + ) + ) + self.plot.scatter(x='time', y='mean_dark', fill_color=color, alpha=0.5, source=amp_source[amp], + legend_label=f'Amp {amp}') + + # Make the x axis tick labels look nice + self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], + seconds=["%d %b %H:%M:%S.%3N"], + hours=["%d %b %H:%M"], + days=["%d %b %H:%M"], + months=["%d %b %Y %H:%M"], + years=["%d %b %Y"] + ) + self.plot.xaxis.major_label_orientation = np.pi / 4 + + # Set x range + time_pad = (max(self.obstime[use_amp]) - min(self.obstime[use_amp])) * 0.05 + if time_pad == timedelta(seconds=0): + time_pad = timedelta(days=1) + self.plot.x_range.start = min(self.obstime[use_amp]) - time_pad + self.plot.x_range.end = max(self.obstime[use_amp]) + time_pad + + # Set y range + max_val = -99999. + min_val = 99999. + for key in self.mean_dark: + mx = np.max(self.mean_dark[key] + self.stdev_dark[key]) + mn = np.min(self.mean_dark[key] - self.stdev_dark[key]) + if mx > max_val: + max_val = mx + if mn < min_val: + min_val = mn + self.plot.y_range.start = min_val * 0.95 + self.plot.y_range.end = max_val * 1.05 + self.plot.legend.location = "top_right" + self.plot.legend.background_fill_color = "#fefefe" + self.plot.grid.grid_line_color="white" + else: + # If there are no data, make a placeholder plot + self.plot = figure(title=f'{self.aperture}: Mean +/- 1-sigma Dark Rate', tools='pan,box_zoom,reset,wheel_zoom,save', + background_fill_color="#fafafa") + self.plot.x_range.start = 0 + self.plot.x_range.end = 1 + self.plot.y_range.start = 0 + self.plot.y_range.end = 1 + + source = ColumnDataSource(data=dict(x=[0.5], y=[0.5], text=['No data'])) + glyph = Text(x="x", y="y", text="text", angle=0., text_color="navy", text_font_size={'value':'20px'}) + self.plot.add_glyph(source, glyph) + + self.plot.xaxis.axis_label = 'Date' + self.plot.yaxis.axis_label = 'Dark Rate (DN/sec)' diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_filesystem_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_filesystem_bokeh.py deleted file mode 100644 index fa208db79..000000000 --- a/jwql/website/apps/jwql/monitor_pages/monitor_filesystem_bokeh.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Jan 16 14:09:18 2019 - -@author: gkanarek -""" - -import json -import os - -from astropy.table import Table, vstack -from astropy.time import Time - -from jwql.bokeh_templating import BokehTemplate -from jwql.utils.utils import get_config - -SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) - -FIG_FORMATS = """ -Figure: - tools: 'pan,box_zoom,reset,wheel_zoom,save' - x_axis_type: 'datetime' - x_axis_label: 'Date' - sizing_mode: 'stretch_both' -Line: - line_width: 2 -""" - - -class MonitorFilesystem(BokehTemplate): - - def pre_init(self): - self._embed = True - - # App design - self.format_string = FIG_FORMATS - self.interface_file = os.path.join(SCRIPT_DIR, "yaml", "monitor_filesystem_interface.yaml") - - # Get path, directories and files in system and count files in all directories - self.settings = get_config() - self.filesystem = self.settings['filesystem'] - self.outputs_dir = os.path.join(self.settings['outputs'], - 'monitor_filesystem') - - self.allowed_types = ['fits_files', 'uncal', 'cal', 'rate', 'rateints', - 'i2d', 'nrc', 'nrs', 'nis', 'mir', 'gui'] - - # Load any existing data - self.initial_load() - - self.types_k = ['circle', 'diamond', 'square', 'triangle', - 'asterisk'] + ['x'] * 6 - self.types_y = ['fits', 'uncal', 'cal', 'rate', 'rateint', - 'i2d', 'nrc', 'nrs', 'nis', 'mir', 'fgs'] - self.types_c = ['black', 'red', 'blue', 'green', 'orange', 'purple', - 'midnightblue', 'springgreen', 'darkcyan', - 'dodgerblue', 'darkred'] - self.types_l = ['Total FITS files', 'Uncalibrated FITS files', - 'Calibrated FITS files', 'Rate FITS files', - 'Rateints FITS files', 'I2D FITS files', - 'NIRCam FITS files', 'NIRSpec FITS files', - 'NIRISS FITS files', 'MIRI FITS files', - 'FGS FITS files'] - - def post_init(self): - self.update_plots(full=True) - - def initial_load(self): - statsfile = os.path.join(self.outputs_dir, 'statsfile.json') - filebytype = os.path.join(self.outputs_dir, 'filesbytype.json') - sizebytype = os.path.join(self.outputs_dir, 'sizebytype.json') - - self.statistics = Table(names=['timestamp', 'file_count', 'total', - 'available', 'used', 'percent_used'], - dtype=[Time, int, int, int, int, float]) - self.statistics['percent_used'].format = "%.1f" - if os.path.exists(statsfile): - with open(statsfile) as f: - stats = json.load(f) - times, fc, tot, avail, used, perc = zip(*stats) - self.statistics['timestamp'] = Time(times) - self.statistics['file_count'] = map(int, fc) - self.statistics['total'] = map(int, tot) - self.statistics['available'] = map(int, avail) - self.statistics['used'] = map(int, used) - self.statistics['percent_used'] = map(float, perc) - - self.ftypes = Table(names=['timestamp'] + self.allowed_types, - dtype=[Time] + [int] * 11) - if os.path.exists(filebytype): - with open(filebytype) as f: - fbytype = json.load(f) - times, *ftypes = zip(*fbytype) - self.ftypes['timestamp'] = Time(times) - for c, colname in enumerate(self.allowed_types): - self.ftypes[colname] = map(int, ftypes[c]) - - self.stypes = Table(names=['timestamp'] + self.allowed_types, - dtype=[Time] + [float] * 11) - if os.path.exists(sizebytype): - with open(sizebytype) as f: - sbytype = json.load(f) - times, *stypes = zip(*sbytype) - self.stypes['timestamp'] = Time(times) - for c, colname in enumerate(self.allowed_types): - self.stypes[colname] = map(int, stypes[c]) - - def update_plots(self, full=False): - - if full: - # Initialize each ColumnDataSource so that we can use stream() later - self.refs['source_filecount'].data = { - 'dates': self.statistics['timestamp'].datetime64, - 'filecount': self.statistics['file_count'].data} - - self.refs['source_stats'].data = { - 'dates': self.statistics['timestamp'].datetime64, - 'systemsize': self.statistics['total'].data.astype(float) / (1024.**3), - 'freesize': self.statistics['available'].data.astype(float) / (1024.**3), - 'usedsize': self.statistics['used'].data.astype(float) / (1024.**3)} - - ftype_dict = {'dates': self.ftypes['timestamp'].datetime64} - ftype_dict.update({x: self.ftypes[y].data for x, y in zip(self.types_y, - self.allowed_types)}) - self.refs['source_files'].data = ftype_dict - - stype_dict = {'dates': self.stypes['timestamp'].datetime64} - stype_dict.update({x: self.stypes[y].data for x, y in zip(self.types_y, - self.allowed_types)}) - self.refs['source_sizes'].data = stype_dict - else: - new_stats, new_files, new_sizes = self.read_new_data() - if new_stats: - self.refs['source_filecount'].stream({ - 'dates': new_stats['timestamp'].datetime64, - 'filecount': new_stats['file_count'].data}) - self.refs['source_stats'].stream({ - 'dates': new_stats['timestamp'].datetime64, - 'systemsize': new_stats['total'].data, - 'freesize': new_stats['available'].data, - 'usedsize': new_stats['used'].data}) - if new_files: - ftype_dict = {'dates': new_files['timestamp'].datetime64} - ftype_dict.update({x: new_files[y].data for x, y in zip(self.types_y, - self.allowed_types)}) - self.refs['source_files'].stream(ftype_dict) - - if new_sizes: - stype_dict = {'dates': new_sizes['timestamp'].datetime64} - stype_dict.update({x: new_sizes[y].data / (1024.**3) - for x, y in zip(self.types_y, self.allowed_types)}) - self.refs['source_sizes'].data = stype_dict - - if not self.statistics: - self.latest_timestamp = Time(0., format='unix') - else: - self.latest_timestamp = self.statistics['timestamp'].max() - - def read_new_data(self): - """ - Algorithm: - 1. Read in the json files (this step will be replaced when we move - away from json) into tables. - 2. Create new tables from all rows which have been added since the - last timestamp in the current tables. - 3. Concatenate the new tables with a vertical join. - 4. Return the new tables so they can be streamed to the plots. - """ - statsfile = os.path.join(self.outputs_dir, 'statsfile.json') - filebytype = os.path.join(self.outputs_dir, 'filesbytype.json') - sizebytype = os.path.join(self.outputs_dir, 'sizebytype.json') - - # Have any of the files been modified since the last timestamp? - stats_modtime = Time(os.stat(statsfile).st_mtime, format='unix') - files_modtime = Time(os.stat(filebytype).st_mtime, format='unix') - sizes_modtime = Time(os.stat(sizebytype).st_mtime, format='unix') - - new_stats = Table(names=self.statistics.colnames, - dtype=self.statistics.dtype) - new_files = Table(names=self.ftypes.colnames, - dtype=self.ftypes.dtype) - new_sizes = Table(names=self.stypes.colnames, - dtype=self.stypes.dtype) - - if stats_modtime > self.latest_timestamp: - with open(statsfile) as f: - stats = json.load(f) - times, fc, tot, avail, used, perc = zip(*stats) - times = Time(times) - new_rows = times > self.latest_timestamp - new_stats['timestamp'] = times[new_rows] - new_stats['file_count'] = map(int, fc[new_rows]) - new_stats['total'] = map(int, tot[new_rows]) - new_stats['available'] = map(int, avail[new_rows]) - new_stats['used'] = map(int, used[new_rows]) - new_stats['percent_used'] = map(float, perc[new_rows]) - - self.statistics = vstack([self.statistics, new_stats]) - - if files_modtime > self.latest_timestamp: - with open(filebytype) as f: - fbytype = json.load(f) - times, *ftypes = zip(*fbytype) - times = Time(times) - new_rows = times > self.latest_timestamp - new_files['timestamp'] = times[new_rows] - for c, colname in enumerate(self.allowed_types): - new_files[colname] = map(int, ftypes[c][new_rows]) - - self.ftypes = vstack([self.ftypes, new_files]) - - if sizes_modtime > self.latest_timestamp: - with open(sizebytype) as f: - sbytype = json.load(f) - times, *stypes = zip(*sbytype) - times = Time(times) - new_rows = times > self.latest_timestamp - new_sizes['timestamp'] = times[new_rows] - for c, colname in enumerate(self.allowed_types): - new_sizes[colname] = map(int, stypes[c][new_rows]) - - self.stypes = vstack([self.stypes, new_sizes]) - - return new_stats, new_files, new_sizes diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_mast_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_mast_bokeh.py deleted file mode 100644 index 4276276dd..000000000 --- a/jwql/website/apps/jwql/monitor_pages/monitor_mast_bokeh.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Feb 5 15:19:20 2019 - -@author: gkanarek -""" - -import os - -from astropy.time import Time -import pandas as pd - -from jwql.bokeh_templating import BokehTemplate -from jwql.utils.utils import get_config - -SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class MastMonitor(BokehTemplate): - - def pre_init(self): - self._embed = True - - # App design - self.format_string = None - self.interface_file = os.path.join(SCRIPT_DIR, 'yaml', "monitor_mast_interface.yaml") - - self.settings = get_config() - self.output_dir = self.settings['outputs'] - - self.read_new_data() - - self.cache_time = Time(0., format='unix') - - self.jwst_bar_colors = self.caom_bar_colors = 3 - self.jwst_datacols = [] - self.caom_datacols = [] - - def post_init(self): - self.update_plots() - - def read_new_data(self): - """ - Placeholder to read what are currently Pandas dataframe dumps. Replace - this when we have a new database infrastructure. - """ - - jwst_filepath = os.path.join(self.outputs_dir, 'database_monitor_jwst.json') - caom_filepath = os.path.join(self.outputs_dir, 'database_monitor_caom.json') - - jwst_modtime = Time(os.stat(jwst_filepath).st_mtime, format='unix') - caom_modtime = Time(os.stat(caom_filepath).st_mtime, format='unix') - - if jwst_modtime >= self.cache_time: - self.jwst_df = pd.read_json(jwst_filepath, orient='records') - if caom_modtime >= self.cache_time: - self.caom_df = pd.read_json(caom_filepath, orient='records') - - self.cache_time = Time.now() - - def update_plots(self): - """ - Update the various sources and variables for the MAST monitor bar charts. - """ - - self.read_new_data() - - jwst_groups = list(self.jwst_df['instrument']) - caom_groups = list(self.caom_df['instrument']) - - self.jwst_datacols = [col for col in list(self.jwst_df.columns) if col != 'instrument'] - self.caom_datacols = [col for col in list(self.caom_df.columns) if col != 'instrument'] - - jwst_data = {'groups': jwst_groups} - caom_data = {'groups': caom_groups} - - for col in self.jwst_datacols: - jwst_data.update({col: list(self.jwst_df[col])}) - for col in self.caom_datacols: - caom_data.update({col: list(self.caom_df[col])}) - - self.jwst_bar_colors = max(3, len(self.jwst_datacols)) - self.caom_bar_colors = max(3, len(self.caom_datacols)) - - jwst_x = [(group, datacol) for group in jwst_groups for datacol in self.jwst_datacols] - jwst_counts = sum(zip(*[jwst_data[col] for col in self.jwst_datacols]), ()) - caom_x = [(group, datacol) for group in caom_groups for datacol in self.caom_datacols] - caom_counts = sum(zip(*[caom_data[col] for col in self.caom_datacols]), ()) - - self.refs['jwst_source'].data = {'x': jwst_x, 'counts': jwst_counts} - self.refs['caom_source'].data = {'x': caom_x, 'counts': caom_counts} diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py index 8f2cda9d0..552cac71f 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py @@ -12,7 +12,7 @@ :: - from jwql.website.apps.jwql import monitor_pages + . monitor_template = monitor_pages.ReadnoiseMonitor() monitor_template.input_parameters = ('NIRCam', 'NRCA1_FULL') """ @@ -20,34 +20,54 @@ from datetime import datetime, timedelta import os +from bokeh.embed import components +from bokeh.layouts import column, row +from bokeh.models import Panel, Tabs # bokeh <= 3.0 +from bokeh.models import ColumnDataSource, HoverTool +# from bokeh.models import TabPanel, Tabs # bokeh >= 3.0 +from bokeh.plotting import figure import numpy as np -from jwql.bokeh_templating import BokehTemplate from jwql.database.database_interface import session from jwql.database.database_interface import FGSReadnoiseStats, MIRIReadnoiseStats, NIRCamReadnoiseStats, NIRISSReadnoiseStats, NIRSpecReadnoiseStats -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE - -SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) - - -class ReadnoiseMonitor(BokehTemplate): - - # Combine the input parameters into a single property because we - # do not want to invoke the setter unless all are updated - @property - def input_parameters(self): - return (self._instrument, self._aperture) - - @input_parameters.setter - def input_parameters(self, info): - self._instrument, self._aperture = info - self.pre_init() - self.post_init() +from jwql.utils.constants import FULL_FRAME_APERTURES, JWST_INSTRUMENT_NAMES_MIXEDCASE + + +class ReadnoiseMonitorData(): + """Class to hold bias data to be plotted + + Parameters + ---------- + + instrument : str + Instrument name (e.g. nircam) + aperture : str + Aperture name (e.g. apername) + + Attributes + ---------- + + instrument : str + Instrument name (e.g. nircam) + aperture : str + Aperture name (e.g. apername) + query_results : list + Results from read noise statistics table based on + instrument, aperture and exposure start time + stats_table : sqlalchemy.orm.decl_api.DeclarativeMeta + Statistics table object to query based on instrument + and aperture + """ + + def __init__(self, instrument, aperture): + self.instrument = instrument + self.aperture = aperture + self.load_data() def identify_tables(self): """Determine which database tables to use for the given instrument""" - mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()] + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument.lower()] self.stats_table = eval('{}ReadnoiseStats'.format(mixed_case_name)) def load_data(self): @@ -59,96 +79,137 @@ def load_data(self): # Query database for all data in readnoise stats with a matching aperture, # and sort the data by exposure start time. self.query_results = session.query(self.stats_table) \ - .filter(self.stats_table.aperture == self._aperture) \ + .filter(self.stats_table.aperture == self.aperture) \ .order_by(self.stats_table.expstart) \ .all() session.close() - def pre_init(self): - # Start with default values for instrument and aperture because - # BokehTemplate's __init__ method does not allow input arguments - try: - dummy_instrument = self._instrument - dummy_aperture = self._aperture - except AttributeError: - self._instrument = 'NIRCam' - self._aperture = '' +class ReadNoiseFigure(): + """Generate tabbed plot displayed in JWQL web application + """ + def __init__(self, instrument): + instrument_apertures = FULL_FRAME_APERTURES[instrument.upper()] - self._embed = True - self.format_string = None - self.interface_file = os.path.join(SCRIPT_DIR, 'yaml', 'monitor_readnoise_interface.yaml') + self.tabs = [] + for aperture in instrument_apertures: + readnoise_tab = ReadNoisePlotTab(instrument, aperture) + self.tabs.append(readnoise_tab.tab) - def post_init(self): + self.plot = Tabs(tabs=self.tabs) + self.tab_components = components(self.plot) - # Load the readnoise data - self.load_data() - # Update the mean readnoise figures - self.update_mean_readnoise_figures() +class ReadNoisePlotTab(): + """Class to make instrument/aperture panels + """ + def __init__(self, instrument, aperture): + self.instrument = instrument + self.aperture = aperture - # Update the readnoise difference image and histogram - self.update_readnoise_diff_plots() + self.db = ReadnoiseMonitorData(self.instrument, self.aperture) - def update_mean_readnoise_figures(self): - """Updates the mean readnoise bokeh plots""" + self.plot_readnoise_amplifers() + self.plot_readnoise_difference_image() + self.plot_readnoise_histogram() - # Get the dark exposures info - filenames = [os.path.basename(result.uncal_filename).replace('_uncal.fits', '') for result in self.query_results] - expstarts_iso = np.array([result.expstart for result in self.query_results]) - expstarts = np.array([datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f') for date in expstarts_iso]) - nints = [result.nints for result in self.query_results] - ngroups = [result.ngroups for result in self.query_results] + self.tab = Panel(child=column(row(*self.amp_plots), + self.diff_image_plot, + self.readnoise_histogram), + title=self.aperture) - # Update the mean readnoise figures for all amps + def plot_readnoise_amplifers(self): + """Class to create readnoise scatter plots per amplifier. + """ + self.amp_plots = [] for amp in ['1', '2', '3', '4']: - readnoise_vals = np.array([getattr(result, 'amp{}_mean'.format(amp)) for result in self.query_results]) - self.refs['mean_readnoise_source_amp{}'.format(amp)].data = {'time': expstarts, - 'time_iso': expstarts_iso, - 'mean_rn': readnoise_vals, - 'filename': filenames, - 'nints': nints, - 'ngroups': ngroups} - self.refs['mean_readnoise_figure_amp{}'.format(amp)].title.text = 'Amp {}'.format(amp) - self.refs['mean_readnoise_figure_amp{}'.format(amp)].hover.tooltips = [('file', '@filename'), - ('time', '@time_iso'), - ('nints', '@nints'), - ('ngroups', '@ngroups'), - ('readnoise', '@mean_rn')] - - # Update plot limits if data exists - if len(readnoise_vals) != 0: - self.refs['mean_readnoise_xr_amp{}'.format(amp)].start = expstarts.min() - timedelta(days=3) - self.refs['mean_readnoise_xr_amp{}'.format(amp)].end = expstarts.max() + timedelta(days=3) - self.refs['mean_readnoise_yr_amp{}'.format(amp)].start = min(x for x in readnoise_vals if x is not None) - 1 - self.refs['mean_readnoise_yr_amp{}'.format(amp)].end = max(x for x in readnoise_vals if x is not None) + 1 - - def update_readnoise_diff_plots(self): - """Updates the readnoise difference image and histogram""" + + amp_plot = figure(title='Amp {}'.format(amp), width=280, height=280, x_axis_type='datetime') + amp_plot.xaxis[0].ticker.desired_num_ticks = 4 + + if self.db.query_results: + readnoise_vals = np.array([getattr(result, 'amp{}_mean'.format(amp)) for result in self.db.query_results]) + else: + readnoise_vals = np.array(list()) + + filenames = [os.path.basename(result.uncal_filename).replace('_uncal.fits', '') for result in self.db.query_results] + expstarts_iso = np.array([result.expstart for result in self.db.query_results]) + expstarts = np.array([datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f') for date in expstarts_iso]) + nints = [result.nints for result in self.db.query_results] + ngroups = [result.ngroups for result in self.db.query_results] + + source = ColumnDataSource(data=dict( + file=filenames, + expstarts=expstarts, + nints=nints, + ngroups=ngroups, + readnoise=readnoise_vals)) + + amp_plot.add_tools(HoverTool(tooltips=[("file", "@file"), + ("time", "@expstarts"), + ("nints", "@nints"), + ("ngroups", "@ngroups"), + ("readnoise", "@readnoise")])) + + amp_plot.circle(x='expstarts', y='readnoise', source=source) + + amp_plot.xaxis.axis_label = 'Date' + amp_plot.yaxis.axis_label = 'Mean Readnoise [DN]' + + self.amp_plots.append(amp_plot) + + def plot_readnoise_difference_image(self): + """Updates the readnoise difference image""" # Update the readnoise difference image and histogram, if data exists - if len(self.query_results) != 0: - # Get the most recent data; the entries were sorted by time when - # loading the database, so the last entry will always be the most recent. - diff_image_png = self.query_results[-1].readnoise_diff_image + + self.diff_image_plot = figure(title='Readnoise Difference (most recent dark - pipeline reffile)', + height=500, width=500, sizing_mode='scale_width') + + if len(self.db.query_results) != 0: + diff_image_png = self.db.query_results[-1].readnoise_diff_image diff_image_png = os.path.join('/static', '/'.join(diff_image_png.split('/')[-6:])) - diff_image_n = np.array(self.query_results[-1].diff_image_n) - diff_image_bin_centers = np.array(self.query_results[-1].diff_image_bin_centers) - - # Update the readnoise difference image and histogram - self.refs['readnoise_diff_image'].image_url(url=[diff_image_png], x=0, y=0, w=2048, h=2048, anchor="bottom_left") - self.refs['diff_hist_source'].data = {'n': diff_image_n, - 'bin_centers': diff_image_bin_centers} - self.refs['diff_hist_xr'].start = diff_image_bin_centers.min() - self.refs['diff_hist_xr'].end = diff_image_bin_centers.max() - self.refs['diff_hist_yr'].start = diff_image_n.min() - self.refs['diff_hist_yr'].end = diff_image_n.max() + diff_image_n.max() * 0.05 - - # Update the readnoise difference image style - self.refs['readnoise_diff_image'].xaxis.visible = False - self.refs['readnoise_diff_image'].yaxis.visible = False - self.refs['readnoise_diff_image'].xgrid.grid_line_color = None - self.refs['readnoise_diff_image'].ygrid.grid_line_color = None - self.refs['readnoise_diff_image'].title.text_font_size = '22px' - self.refs['readnoise_diff_image'].title.align = 'center' + self.diff_image_plot.image_url(url=[diff_image_png], x=0, y=0, w=2048, h=2048, anchor="bottom_left") + + self.diff_image_plot.xaxis.visible = False + self.diff_image_plot.yaxis.visible = False + self.diff_image_plot.xgrid.grid_line_color = None + self.diff_image_plot.ygrid.grid_line_color = None + self.diff_image_plot.title.text_font_size = '22px' + self.diff_image_plot.title.align = 'center' + + + def plot_readnoise_histogram(self): + """Updates the readnoise histogram""" + + if len(self.db.query_results) != 0: + diff_image_n = np.array(self.db.query_results[-1].diff_image_n) + diff_image_bin_centers = np.array(self.db.query_results[-1].diff_image_bin_centers) + else: + diff_image_n = np.array(list()) + diff_image_bin_centers = np.array(list()) + + hist_xr_start = diff_image_bin_centers.min() + hist_xr_end = diff_image_bin_centers.max() + hist_yr_start = diff_image_n.min() + hist_yr_end = diff_image_n.max() + diff_image_n.max() * 0.05 + + self.readnoise_histogram = figure(height=500, width=500, + x_range=(hist_xr_start, hist_xr_end), + y_range=(hist_yr_start, hist_yr_end), + sizing_mode='scale_width') + + source = ColumnDataSource(data=dict( + x=diff_image_bin_centers, + y=diff_image_n, + )) + + self.readnoise_histogram.add_tools(HoverTool(tooltips=[("Data (x, y)", "(@x, @y)"),])) + + self.readnoise_histogram.circle(x='x', y='y', source=source) + + self.readnoise_histogram.xaxis.axis_label = 'Readnoise Difference [DN]' + self.readnoise_histogram.yaxis.axis_label = 'Number of Pixels' + self.readnoise_histogram.xaxis.axis_label_text_font_size = "15pt" + self.readnoise_histogram.yaxis.axis_label_text_font_size = "15pt" diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/badpixel_monitor_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/badpixel_monitor_interface.yaml deleted file mode 100755 index 82ebff748..000000000 --- a/jwql/website/apps/jwql/monitor_pages/yaml/badpixel_monitor_interface.yaml +++ /dev/null @@ -1,417 +0,0 @@ -# YAML file defining bokeh figures for the bad pixel monitor -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Dead Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &dead_history_source - ref: "dead_history_source" - data: - time: !self.bad_history['DEAD'][0] - dead_pixels: !self.bad_history['DEAD'][1] - time_labels: !self.bad_history['DEAD'][2] - -- !Range1d: &dead_history_xrange - ref: "dead_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &dead_history_yrange - ref: "dead_history_yrange" - start: 0 - end: 1 - bounds: 'auto' # !!python/tuple [-1, 1] - -- !Figure: &dead_history_figure - ref: "dead_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of DEAD pixels" - x_range: *dead_history_xrange - y_range: *dead_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "@time_labels Dead pix: @dead_pixels" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'dead_pixels', line_width: 5, 'source': *dead_history_source} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Hot Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &hot_history_source - ref: "hot_history_source" - data: - time: !self.bad_history['HOT'][0] - hot_pixels: !self.bad_history['HOT'][1] - time_labels: !self.bad_history['HOT'][2] - -- !Range1d: &hot_history_xrange - ref: "hot_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &hot_history_yrange - ref: "hot_history_yrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [-1, 1] - -- !Figure: &hot_history_figure - ref: "hot_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of HOT pixels" - x_range: *hot_history_xrange - y_range: *hot_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "@time_labels Hot pix: @hot_pixels" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'hot_pixels', line_width: 5, 'source': *hot_history_source} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Low QE Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &low_qe_history_source - ref: "low_qe_history_source" - data: - time: !self.bad_history['LOW_QE'][0] - low_qe_pixels: !self.bad_history['LOW_QE'][1] - time_labels: !self.bad_history['LOW_QE'][2] - -- !Range1d: &low_qe_history_xrange - ref: "low_qe_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &low_qe_history_yrange - ref: "low_qe_history_yrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [-1, 1] - -- !Figure: &low_qe_history_figure - ref: "low_qe_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of LOW QE pixels" - x_range: *low_qe_history_xrange - y_range: *low_qe_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "@time_labels Low QE pix: @low_qe_pixels" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'low_qe_pixels', line_width: 5, 'source': *low_qe_history_source} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Open Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &open_history_source - ref: "open_history_source" - data: - time: !self.bad_history['OPEN'][0] - open_pixels: !self.bad_history['OPEN'][1] - time_labels: !self.bad_history['OPEN'][2] - -- !Range1d: &open_history_xrange - ref: "open_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &open_history_yrange - ref: "open_history_yrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [-1, 1] - -- !Figure: &open_history_figure - ref: "open_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of OPEN pixels" - x_range: *open_history_xrange - y_range: *open_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "@time_labels Open pix: @open_pixels" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'open_pixels', line_width: 5, 'source': *open_history_source} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Open Adj Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &adj_open_history_source - ref: "adj_open_history_source" - data: - time: !self.bad_history['ADJ_OPEN'][0] - adj_open_pixels: !self.bad_history['ADJ_OPEN'][1] - time_labels: !self.bad_history['ADJ_OPEN'][2] - -- !Range1d: &adj_open_history_xrange - ref: "adj_open_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &adj_open_history_yrange - ref: "adj_open_history_yrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [-1, 1] - -- !Figure: &adj_open_history_figure - ref: "adj_open_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of OPEN ADJACENT pixels" - x_range: *adj_open_history_xrange - y_range: *adj_open_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "@time_labels Adj open pix: @adj_open_pixels" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'adj_open_pixels', line_width: 5, 'source': *adj_open_history_source} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# RC Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &rc_history_source - ref: "rc_history_source" - data: - time: !self.bad_history['RC'][0] - rc_pixels: !self.bad_history['RC'][1] - time_labels: !self.bad_history['RC'][2] - -- !Range1d: &rc_history_xrange - ref: "rc_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &rc_history_yrange - ref: "rc_history_yrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [-1, 1] - -- !Figure: &rc_history_figure - ref: "rc_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of RC pixels" - x_range: *rc_history_xrange - y_range: *rc_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "@time_labels RC pix: @rc_pixels" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'rc_pixels', line_width: 5, 'source': *rc_history_source} - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Other Bad Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &other_bad_pixel_history_source - ref: "other_bad_pixel_history_source" - data: - time: !self.bad_history['OTHER_BAD_PIXEL'][0] - other_bad_pixel_pixels: !self.bad_history['OTHER_BAD_PIXEL'][1] - time_labels: !self.bad_history['OTHER_BAD_PIXEL'][2] - -- !Range1d: &other_bad_pixel_history_xrange - ref: "other_bad_pixel_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &other_bad_pixel_history_yrange - ref: "other_bad_pixel_history_yrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [-1, 1] - -- !Figure: &other_bad_pixel_history_figure - ref: "other_bad_pixel_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of OTHER BAD pixels" - x_range: *other_bad_pixel_history_xrange - y_range: *other_bad_pixel_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: '@time_labels Other bad pix: @other_bad_pixel_pixels' - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'other_bad_pixel_pixels', line_width: 5, 'source': *other_bad_pixel_history_source} - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Telegraph Pixels v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &telegraph_history_source - ref: "telegraph_history_source" - data: - time: !self.bad_history['TELEGRAPH'][0] - telegraph_pixels: !self.bad_history['TELEGRAPH'][1] - time_labels: !self.bad_history['TELEGRAPH'][2] - -- !Range1d: &telegraph_history_xrange - ref: "telegraph_history_xrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &telegraph_history_yrange - ref: "telegraph_history_yrange" - start: 0 - end: 1 - bounds: 'auto' #!!python/tuple [-1, 1] - -- !Figure: &telegraph_history_figure - ref: "telegraph_history_figure" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Number of TELEGRAPH pixels" - x_range: *telegraph_history_xrange - y_range: *telegraph_history_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "@time_labels Telegraph pix: @telegraph_pixels" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'telegraph_pixels', line_width: 5, 'source': *telegraph_history_source} - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Show positions of bad pixels found in darks -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &dark_position_source - ref: "dark_position_source" - data: - bad_type: !self.latest_bad_from_dark_type - x_coord: !self.latest_bad_from_dark_x - y_coord: !self.latest_bad_from_dark_y - -- !Range1d: &dark_position_xrange - ref: "dark_position_xrange" - start: 0 - end: 2047 - -- !Range1d: &dark_position_yrange - ref: "dark_position_yrange" - start: 0 - end: 2047 - -- !CDSView: &figure_hot_view - source: *dark_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "HOT" - -- !CDSView: &figure_rc_view - source: *dark_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "RC" - -- !CDSView: &figure_other_view - source: *dark_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "OTHER_BAD_PIXEL" - -- !CDSView: &figure_telegraph_view - source: *dark_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "TELEGRAPH" - -- !Figure: &dark_position_figure - ref: "dark_position_figure" - x_axis_label: "X (pixel)" - y_axis_label: "Y (pixel)" - x_range: *dark_position_xrange - y_range: *dark_position_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "(x,y): (@x_coord, @y_coord) Type: @bad_type" - elements: - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *dark_position_source, 'color': 'red', 'alpha': 0.5, 'legend': 'Hot', 'size': 6, 'view': *figure_hot_view} - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *dark_position_source, 'color': 'blue', 'alpha': 0.5, 'legend': 'RC', 'size': 6, 'view': *figure_rc_view} - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *dark_position_source, 'color': 'green', 'alpha': 0.5, 'legend': 'Other', 'size': 6, 'view': *figure_other_view} - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *dark_position_source, 'color': 'black', 'alpha': 0.5, 'legend': 'Telegraph', 'size': 6, 'view': *figure_telegraph_view} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Show positions of bad pixels found in flats -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &flat_position_source - ref: "flat_position_source" - data: - bad_type: !self.latest_bad_from_flat_type - x_coord: !self.latest_bad_from_flat_x - y_coord: !self.latest_bad_from_flat_y - -- !Range1d: &flat_position_xrange - ref: "flat_position_xrange" - start: 0 - end: 2047 - -- !Range1d: &flat_position_yrange - ref: "flat_position_yrange" - start: 0 - end: 2047 - -- !CDSView: &figure_dead_view - source: *flat_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "DEAD" - -- !CDSView: &figure_open_view - source: *flat_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "OPEN" - -- !CDSView: &figure_adj_open_view - source: *flat_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "ADJ_OPEN" - -- !CDSView: &figure_low_qe_view - source: *flat_position_source - filters: - - !GroupFilter: - column_name: "bad_type" - group: "LOW_QE" - -- !Figure: &flat_position_figure - ref: "flat_position_figure" - x_axis_label: "X (pixel)" - y_axis_label: "Y (pixel)" - x_range: *flat_position_xrange - y_range: *flat_position_yrange - tools: 'hover, zoom_in, zoom_out, box_zoom, pan, reset, save' - tooltips: "(x, y): (@x_coord, @y_coord) Type: @bad_type" - elements: - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *flat_position_source, 'color': 'red', 'alpha': 0.5, 'legend': 'Dead', 'size': 6, 'view': *figure_dead_view} - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *flat_position_source, 'color': 'blue', 'alpha': 0.5, 'legend': 'Open', 'size': 6, 'view': *figure_open_view} - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *flat_position_source, 'color': 'green', 'alpha': 0.5, 'legend': 'Adj Open', 'size': 6, 'view': *figure_adj_open_view} - - {'kind': 'circle', 'x': 'x_coord', 'y': 'y_coord', 'source': *flat_position_source, 'color': 'black', 'alpha': 0.5, 'legend': 'Low QE', 'size': 6, 'view': *figure_low_qe_view} - - -#- !Document: -# - !row: -# - *dark_position_figure -# - *flat_position_figure -# - !row: -# - *dead_history_figure -# - *hot_history_figure -# - !row: -# - *low_qe_history_figure -# - *rc_history_figure -# - !row: -# - *open_history_figure -# - *adj_open_history_figure -# - !row: -# - *telegraph_history_figure -# - *other_bad_pixel_history_figure \ No newline at end of file diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml deleted file mode 100755 index cdadceb33..000000000 --- a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml +++ /dev/null @@ -1,109 +0,0 @@ -# YAML file defining bokeh figures for the dark monitor -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Dark Current v. Time Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &dark_current_source - ref: "dark_current_source" - data: - time: !self.timestamps - dark_current: !self.dark_current - -- !Range1d: &dark_current_xrange - ref: "dark_current_xrange" - #start: 0 - #end: 1 - #bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &dark_current_yrange - ref: "dark_current_yrange" - #start: 0 - #end: 1 - #bounds: !!python/tuple [-1, 1] - -- !Figure: &dark_current_time_figure - ref: "dark_current_time_figure" - x_axis_label: "Time (MJD)" - y_axis_label: "Dark current (e-)" - x_range: *dark_current_xrange - y_range: *dark_current_yrange - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'dark_current', line_width: 5, 'source': *dark_current_source} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Dark Histogram Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &dark_full_hist_source - ref: "dark_full_hist_source" - data: - full_dark_bin_center: !self.full_dark_bin_center - full_dark_amplitude: !self.full_dark_amplitude - full_dark_bottom: !self.full_dark_bottom - full_dark_bin_width: !self.full_dark_bin_width - -- !Range1d: &dark_histogram_xrange - ref: "dark_histogram_xrange" - #start: 0 - #end: 1 - #bounds: 'auto' #!!python/tuple [0, 1] - -- !Range1d: &dark_histogram_yrange - ref: "dark_histogram_yrange" - #start: 0 - #end: 1 - #bounds: !!python/tuple [0, 1] - -- !Figure: &dark_full_histogram_figure - ref: "dark_full_histogram_figure" - x_axis_label: "Dark Current (DN/sec)" - y_axis_label: "Number of Pixels" - x_range: *dark_histogram_xrange - y_range: *dark_histogram_yrange - elements: - - {'kind': 'vbar', 'x': 'full_dark_bin_center', 'y': 'full_dark_bin_width', 'top': 'full_dark_amplitude', 'bottom': 'full_dark_bottom', 'source': *dark_full_hist_source} -# - {'kind': 'text', 'x': 0, 'y': 20000, 'id': 1001} - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Dark Image Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_dark_source - ref: "mean_dark_source" - data: - dh: [1] - dw: [1] - image: [[[1,0], [0, 1]]] -- !Range1d: &stamp_xr - ref: "stamp_xr" - #start: 0 - #end: 1 - #bounds: !!python/tuple [0, 1] -- !Range1d: &stamp_yr - ref: "stamp_yr" - #start: 0 - #end: 1 - #bounds: !!python/tuple [0, 1] -- !LogColorMapper: &log_mapper - ref: "log_mapper" - palette: "Viridis256" - low: 0. - high: 1. -- !ColorBar: &mean_dark_cbar - ref: "mean_dark_cbar" - color_mapper: *log_mapper - location: !!python/tuple [0, 0] -- !Figure: &mean_dark_image_figure - ref: "mean_dark_image_figure" - x_axis_label: "Col = SIAF det Y" - y_axis_label: "Row = SIAF det X" - x_range: *stamp_xr - y_range: *stamp_yr - tools: "" - height: 250 # Not working - width: 250 # Not working - elements: - - {"kind": "image", "image": "image", "x": 0, "y": 0, "dh": 'dh', "dw": 'dh', "source": *mean_dark_source, "color_mapper": *log_mapper} - - {"kind": "layout", "obj": *mean_dark_cbar, "place": "right"} - -#- !Document: -# - !column: -# - *dark_current_time_figure -# - *dark_full_histogram_figure diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_bias_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_bias_interface.yaml deleted file mode 100644 index 86e3a9a2d..000000000 --- a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_bias_interface.yaml +++ /dev/null @@ -1,363 +0,0 @@ -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp1 Even -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp1_even - ref: "mean_bias_source_amp1_even" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp1_even - ref: "mean_bias_xr_amp1_even" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp1_even - ref: "mean_bias_yr_amp1_even" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp1_even - ref: "mean_bias_figure_amp1_even" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp1_even - y_range: *mean_bias_yr_amp1_even - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp1_even} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp1 Odd -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp1_odd - ref: "mean_bias_source_amp1_odd" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp1_odd - ref: "mean_bias_xr_amp1_odd" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp1_odd - ref: "mean_bias_yr_amp1_odd" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp1_odd - ref: "mean_bias_figure_amp1_odd" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp1_odd - y_range: *mean_bias_yr_amp1_odd - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp1_odd} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp2 Even -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp2_even - ref: "mean_bias_source_amp2_even" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp2_even - ref: "mean_bias_xr_amp2_even" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp2_even - ref: "mean_bias_yr_amp2_even" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp2_even - ref: "mean_bias_figure_amp2_even" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp2_even - y_range: *mean_bias_yr_amp2_even - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp2_even} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp2 Odd -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp2_odd - ref: "mean_bias_source_amp2_odd" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp2_odd - ref: "mean_bias_xr_amp2_odd" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp2_odd - ref: "mean_bias_yr_amp2_odd" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp2_odd - ref: "mean_bias_figure_amp2_odd" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp2_odd - y_range: *mean_bias_yr_amp2_odd - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp2_odd} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp3 Even -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp3_even - ref: "mean_bias_source_amp3_even" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp3_even - ref: "mean_bias_xr_amp3_even" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp3_even - ref: "mean_bias_yr_amp3_even" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp3_even - ref: "mean_bias_figure_amp3_even" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp3_even - y_range: *mean_bias_yr_amp3_even - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp3_even} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp3 Odd -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp3_odd - ref: "mean_bias_source_amp3_odd" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp3_odd - ref: "mean_bias_xr_amp3_odd" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp3_odd - ref: "mean_bias_yr_amp3_odd" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp3_odd - ref: "mean_bias_figure_amp3_odd" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp3_odd - y_range: *mean_bias_yr_amp3_odd - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp3_odd} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp4 Even -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp4_even - ref: "mean_bias_source_amp4_even" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp4_even - ref: "mean_bias_xr_amp4_even" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp4_even - ref: "mean_bias_yr_amp4_even" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp4_even - ref: "mean_bias_figure_amp4_even" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp4_even - y_range: *mean_bias_yr_amp4_even - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp4_even} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Bias vs Time Figures Amp4 Odd -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_bias_source_amp4_odd - ref: "mean_bias_source_amp4_odd" - data: - time: [] - time_iso: [] - mean_bias: [] - filename: [] -- !Range1d: &mean_bias_xr_amp4_odd - ref: "mean_bias_xr_amp4_odd" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_bias_yr_amp4_odd - ref: "mean_bias_yr_amp4_odd" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_bias_figure_amp4_odd - ref: "mean_bias_figure_amp4_odd" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Bias Level [DN]" - x_range: *mean_bias_xr_amp4_odd - y_range: *mean_bias_yr_amp4_odd - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_bias', 'size': 6, 'source': *mean_bias_source_amp4_odd} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Calibrated 0th Group Image -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &cal_data - ref: "cal_data" - data: - dh: [1] - dw: [1] - image: [[[0,0], [0, 0]]] -- !Figure: &cal_image - ref: "cal_image" - title: 'Calibrated Zeroth Group of Most Recent Dark' - elements: - - {"kind": "image", "image": "image", "x": 0, "y": 0, "dh": 'dh', "dw": 'dh', "source": *cal_data} - tools: "" -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Calibrated 0th Group Histogram -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &cal_hist_source - ref: "cal_hist_source" - data: - counts: [] - bin_centers: [] -- !Range1d: &cal_hist_xr - ref: "cal_hist_xr" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &cal_hist_yr - ref: "cal_hist_yr" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &cal_hist - ref: "cal_hist" - x_axis_label: "Signal [DN]" - y_axis_label: "Number of Pixels" - x_range: *cal_hist_xr - y_range: *cal_hist_yr - height: 250 - width: 300 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'bin_centers', 'y': 'counts', 'size': 4, 'source': *cal_hist_source} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Collapsed Columns Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &collapsed_columns_source - ref: "collapsed_columns_source" - data: - pixel: [] - signal: [] -- !Range1d: &collapsed_columns_pixel_range - ref: "collapsed_columns_pixel_range" - start: 0 - end: 10 - bounds: 'auto' -- !Range1d: &collapsed_columns_signal_range - ref: "collapsed_columns_signal_range" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &collapsed_columns_figure - ref: "collapsed_columns_figure" - x_axis_label: "Column #" - y_axis_label: "Median Signal [DN]" - x_range: *collapsed_columns_pixel_range - y_range: *collapsed_columns_signal_range - height: 500 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'pixel', 'y': 'signal', 'size': 3, 'source': *collapsed_columns_source} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Collapsed Rows Figure -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &collapsed_rows_source - ref: "collapsed_rows_source" - data: - pixel: [] - signal: [] -- !Range1d: &collapsed_rows_pixel_range - ref: "collapsed_rows_pixel_range" - start: 0 - end: 10 - bounds: 'auto' -- !Range1d: &collapsed_rows_signal_range - ref: "collapsed_rows_signal_range" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &collapsed_rows_figure - ref: "collapsed_rows_figure" - x_axis_label: "Row #" - y_axis_label: "Median Signal [DN]" - x_range: *collapsed_rows_pixel_range - y_range: *collapsed_rows_signal_range - height: 500 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'pixel', 'y': 'signal', 'size': 3, 'source': *collapsed_rows_source} - -# Document structure -# - !Document: -# - !row: -# - *mean_bias_figure \ No newline at end of file diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_filesystem_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_filesystem_interface.yaml deleted file mode 100644 index a1da194e9..000000000 --- a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_filesystem_interface.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# Unified x range for all files -- !Range1d: &xr_all - ref: "xr_all" - start: 0 - end: 100 - bounds: !!python/tuple [0, 100] -#File count figure -- !ColumnDataSource: &source_filecount - ref: "source_filecount" - data: - dates: [] - filecount: [] -- !Range1d: &yr_filecount - ref: "yr_filecount" - start: 0 - end: 100 - bounds: !!python/tuple [0, 100] -- !Figure: &fig_filecount - ref: "fig_filecount" - title: "Total File Counts" - y_axis_label: 'Count' - x_range: *xr_all - y_range: *yr_filecount - elements: - - {'kind': 'line', 'x': 'dates', 'y': 'filecount', 'line_color': 'blue', 'source': *source_filecount} -#System stats figure -- !ColumnDataSource: &source_stats - ref: "source_stats" - data: - dates: [] - systemsize: [] - freesize: [] - usedsize: [] -- !Range1d: &yr_stats - ref: "yr_stats" - start: 0 - end: 100 - bounds: !!python/tuple [0, 100] -- !Figure: &fig_stats - ref: "fig_system_stats" - title: "System Stats" - y_axis_label: 'Count' - x_range: *xr_all - y_range: *yr_stats - elements: - - {'kind': 'line', 'x': 'dates', 'y': 'systemsize', 'line_color': 'red', 'source': *source_stats, 'legend': 'Total size'} - - {'kind': 'line', 'x': 'dates', 'y': 'freesize', 'line_color': 'blue', 'source': *source_stats, 'legend': 'Free bytes'} - - {'kind': 'line', 'x': 'dates', 'y': 'usedsize', 'line_color': 'green', 'source': *source_stats, 'legend': 'Used bytes'} - - {'kind': 'circle', 'x': 'dates', 'y': 'systemsize', 'color': 'red', 'source': *source_stats} - - {'kind': 'circle', 'x': 'dates', 'y': 'freesize', 'color': 'blue', 'source': *source_stats} - - {'kind': 'circle', 'x': 'dates', 'y': 'usedsize', 'color': 'green', 'source': *source_stats} -#File types figure -- !ColumnDataSource: &source_files - ref: "source_files" - data: - dates: [] - fits: [] - uncal: [] - cal: [] - rate: [] - rateint: [] - i2d: [] - nrc: [] - nrs: [] - nis: [] - mir: [] - fgs: [] -- !Range1d: &yr_files - ref: "yr_files" - start: 0 - end: 100 - bounds: !!python/tuple [0, 100] -- !Figure: &fig_files - ref: "fig_filecount_type" - title: "Total File Counts by Type" - y_axis_label: 'Count' - x_range: *xr_all - y_range: *yr_files - elements: - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[0] , 'line_color': !self.types_c[0] , 'source': *source_files, 'legend': !self.types_l[0] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[1] , 'line_color': !self.types_c[1] , 'source': *source_files, 'legend': !self.types_l[1] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[2] , 'line_color': !self.types_c[2] , 'source': *source_files, 'legend': !self.types_l[2] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[3] , 'line_color': !self.types_c[3] , 'source': *source_files, 'legend': !self.types_l[3] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[4] , 'line_color': !self.types_c[4] , 'source': *source_files, 'legend': !self.types_l[4] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[5] , 'line_color': !self.types_c[5] , 'source': *source_files, 'legend': !self.types_l[5] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[6] , 'line_color': !self.types_c[6] , 'source': *source_files, 'legend': !self.types_l[6] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[7] , 'line_color': !self.types_c[7] , 'source': *source_files, 'legend': !self.types_l[7] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[8] , 'line_color': !self.types_c[8] , 'source': *source_files, 'legend': !self.types_l[8] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[9] , 'line_color': !self.types_c[9] , 'source': *source_files, 'legend': !self.types_l[9] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[10] , 'line_color': !self.types_c[10] , 'source': *source_files, 'legend': !self.types_l[10] } - - {'kind': !self.types_k[0] , 'x': 'dates', 'y': !self.types_y[0] , 'color': !self.types_c[0] , 'source': *source_files} - - {'kind': !self.types_k[1] , 'x': 'dates', 'y': !self.types_y[1] , 'color': !self.types_c[1] , 'source': *source_files} - - {'kind': !self.types_k[2] , 'x': 'dates', 'y': !self.types_y[2] , 'color': !self.types_c[2] , 'source': *source_files} - - {'kind': !self.types_k[3] , 'x': 'dates', 'y': !self.types_y[3] , 'color': !self.types_c[3] , 'source': *source_files} - - {'kind': !self.types_k[4] , 'x': 'dates', 'y': !self.types_y[4] , 'color': !self.types_c[4] , 'source': *source_files} - - {'kind': !self.types_k[5] , 'x': 'dates', 'y': !self.types_y[5] , 'color': !self.types_c[5] , 'source': *source_files} - - {'kind': !self.types_k[6] , 'x': 'dates', 'y': !self.types_y[6] , 'color': !self.types_c[6] , 'source': *source_files} - - {'kind': !self.types_k[7] , 'x': 'dates', 'y': !self.types_y[7] , 'color': !self.types_c[7] , 'source': *source_files} - - {'kind': !self.types_k[8] , 'x': 'dates', 'y': !self.types_y[8] , 'color': !self.types_c[8] , 'source': *source_files} - - {'kind': !self.types_k[9] , 'x': 'dates', 'y': !self.types_y[9] , 'color': !self.types_c[9] , 'source': *source_files} - - {'kind': !self.types_k[10] , 'x': 'dates', 'y': !self.types_y[10] , 'color': !self.types_c[10] , 'source': *source_files} -#File sizes figure -- !ColumnDataSource: &source_sizes - ref: "source_sizes" - data: - dates: [] - fits: [] - uncal: [] - cal: [] - rate: [] - rateint: [] - i2d: [] - nrc: [] - nrs: [] - nis: [] - mir: [] - fgs: [] -- !Range1d: &yr_sizes - ref: "yr_sizes" - start: 0 - end: 100 - bounds: !!python/tuple [0, 100] -- !Figure: &fig_sizes - ref: "fig_size_type" - title: "Total File Sizes by Type" - y_axis_label: 'GB' - x_range: *xr_all - y_range: *yr_sizes - elements: - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[0] , 'line_color': !self.types_c[0] , 'source': *source_sizes, 'legend': !self.types_l[0] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[1] , 'line_color': !self.types_c[1] , 'source': *source_sizes, 'legend': !self.types_l[1] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[2] , 'line_color': !self.types_c[2] , 'source': *source_sizes, 'legend': !self.types_l[2] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[3] , 'line_color': !self.types_c[3] , 'source': *source_sizes, 'legend': !self.types_l[3] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[4] , 'line_color': !self.types_c[4] , 'source': *source_sizes, 'legend': !self.types_l[4] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[5] , 'line_color': !self.types_c[5] , 'source': *source_sizes, 'legend': !self.types_l[5] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[6] , 'line_color': !self.types_c[6] , 'source': *source_sizes, 'legend': !self.types_l[6] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[7] , 'line_color': !self.types_c[7] , 'source': *source_sizes, 'legend': !self.types_l[7] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[8] , 'line_color': !self.types_c[8] , 'source': *source_sizes, 'legend': !self.types_l[8] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[9] , 'line_color': !self.types_c[9] , 'source': *source_sizes, 'legend': !self.types_l[9] } - - {'kind': 'line', 'x': 'dates', 'y': !self.types_y[10] , 'line_color': !self.types_c[10] , 'source': *source_sizes, 'legend': !self.types_l[10] } - - {'kind': !self.types_k[0] , 'x': 'dates', 'y': !self.types_y[0] , 'color': !self.types_c[0] , 'source': *source_sizes} - - {'kind': !self.types_k[1] , 'x': 'dates', 'y': !self.types_y[1] , 'color': !self.types_c[1] , 'source': *source_sizes} - - {'kind': !self.types_k[2] , 'x': 'dates', 'y': !self.types_y[2] , 'color': !self.types_c[2] , 'source': *source_sizes} - - {'kind': !self.types_k[3] , 'x': 'dates', 'y': !self.types_y[3] , 'color': !self.types_c[3] , 'source': *source_sizes} - - {'kind': !self.types_k[4] , 'x': 'dates', 'y': !self.types_y[4] , 'color': !self.types_c[4] , 'source': *source_sizes} - - {'kind': !self.types_k[5] , 'x': 'dates', 'y': !self.types_y[5] , 'color': !self.types_c[5] , 'source': *source_sizes} - - {'kind': !self.types_k[6] , 'x': 'dates', 'y': !self.types_y[6] , 'color': !self.types_c[6] , 'source': *source_sizes} - - {'kind': !self.types_k[7] , 'x': 'dates', 'y': !self.types_y[7] , 'color': !self.types_c[7] , 'source': *source_sizes} - - {'kind': !self.types_k[8] , 'x': 'dates', 'y': !self.types_y[8] , 'color': !self.types_c[8] , 'source': *source_sizes} - - {'kind': !self.types_k[9] , 'x': 'dates', 'y': !self.types_y[9] , 'color': !self.types_c[9] , 'source': *source_sizes} - - {'kind': !self.types_k[10] , 'x': 'dates', 'y': !self.types_y[10] , 'color': !self.types_c[10] , 'source': *source_sizes} -#Widget layout -- !gridplot: - ref: "filesystem_layout" - arg: [[[*fig_filecount, *fig_stats], [*fig_files, *fig_sizes]]] \ No newline at end of file diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_mast_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_mast_interface.yaml deleted file mode 100644 index 810984eaa..000000000 --- a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_mast_interface.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# JWST figure -- !ColumnDataSource: &jwst_source - ref: "jwst_source" - data: - x: [] - counts: [] -- !HoverTool: &jwst_hover - ref: "jwst_hover" - tooltips: [('count', '@counts')] -- !FactorRange: &jwst_xrange - ref: "jwst_xrange" - factors: !self.jwst_groups -- !factor_cmap: &jwst_cmap - ref: "jwst_cmap" - arg: 'x' - palette: !Category20c[!self.jwst_bar_colors ] - factors: !self.jwst_datacols - start: 1 - end: 2 -- !Figure: &jwst_figure - ref: "jwst_figure" - x_range: *jwst_xrange - plot_height: 250 - tools: [*jwst_hover ] - elements: - - kind: 'vbar' - x: 'x' - top: 'counts' - width: 0.9 - source: *jwst_source - line_color: 'white' - fill_color: *jwst_cmap -# CAOM figure -- !ColumnDataSource: &caom_source - ref: "caom_source" - data: - x: [] - counts: [] -- !HoverTool: &caom_hover - ref: "caom_hover" - tooltips: [('count', '@counts')] -- !FactorRange: &caom_xrange - ref: "caom_xrange" - factors: !self.caom_groups -- !factor_cmap: &caom_cmap - ref: "caom_cmap" - arg: 'x' - palette: !Category20c[!self.caom_bar_colors ] - factors: !self.caom_datacols - start: 1 - end: 2 -- !Figure: &caom_figure - ref: "caom_figure" - x_range: *caom_xrange - plot_height: 250 - tools: [*caom_hover ] - elements: - - kind: 'vbar' - x: 'x' - top: 'counts' - width: 0.9 - source: *caom_source - line_color: 'white' - fill_color: *caom_cmap \ No newline at end of file diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_readnoise_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_readnoise_interface.yaml deleted file mode 100644 index 4d2fd8a78..000000000 --- a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_readnoise_interface.yaml +++ /dev/null @@ -1,189 +0,0 @@ -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Readnoise vs Time Figures Amp1 -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_readnoise_source_amp1 - ref: "mean_readnoise_source_amp1" - data: - time: [] - time_iso: [] - mean_rn: [] - filename: [] - nints: [] - ngroups: [] -- !Range1d: &mean_readnoise_xr_amp1 - ref: "mean_readnoise_xr_amp1" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_readnoise_yr_amp1 - ref: "mean_readnoise_yr_amp1" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_readnoise_figure_amp1 - ref: "mean_readnoise_figure_amp1" - title: "Amp 1" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Mean readnoise [DN]" - x_range: *mean_readnoise_xr_amp1 - y_range: *mean_readnoise_yr_amp1 - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_rn', 'size': 6, 'source': *mean_readnoise_source_amp1} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Readnoise vs Time Figures Amp2 -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_readnoise_source_amp2 - ref: "mean_readnoise_source_amp2" - data: - time: [] - time_iso: [] - mean_rn: [] - filename: [] - nints: [] - ngroups: [] -- !Range1d: &mean_readnoise_xr_amp2 - ref: "mean_readnoise_xr_amp2" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_readnoise_yr_amp2 - ref: "mean_readnoise_yr_amp2" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_readnoise_figure_amp2 - ref: "mean_readnoise_figure_amp2" - title: "Amp 2" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Mean readnoise [DN]" - x_range: *mean_readnoise_xr_amp2 - y_range: *mean_readnoise_yr_amp2 - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_rn', 'size': 6, 'source': *mean_readnoise_source_amp2} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Readnoise vs Time Figures Amp3 -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_readnoise_source_amp3 - ref: "mean_readnoise_source_amp3" - data: - time: [] - time_iso: [] - mean_rn: [] - filename: [] - nints: [] - ngroups: [] -- !Range1d: &mean_readnoise_xr_amp3 - ref: "mean_readnoise_xr_amp3" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_readnoise_yr_amp3 - ref: "mean_readnoise_yr_amp3" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_readnoise_figure_amp3 - ref: "mean_readnoise_figure_amp3" - title: "Amp 3" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Mean readnoise [DN]" - x_range: *mean_readnoise_xr_amp3 - y_range: *mean_readnoise_yr_amp3 - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_rn', 'size': 6, 'source': *mean_readnoise_source_amp3} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Mean Readnoise vs Time Figures Amp4 -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &mean_readnoise_source_amp4 - ref: "mean_readnoise_source_amp4" - data: - time: [] - time_iso: [] - mean_rn: [] - filename: [] - nints: [] - ngroups: [] -- !Range1d: &mean_readnoise_xr_amp4 - ref: "mean_readnoise_xr_amp4" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &mean_readnoise_yr_amp4 - ref: "mean_readnoise_yr_amp4" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &mean_readnoise_figure_amp4 - ref: "mean_readnoise_figure_amp4" - title: "Amp 4" - x_axis_label: "Date" - x_axis_type: "datetime" - y_axis_label: "Mean readnoise [DN]" - x_range: *mean_readnoise_xr_amp4 - y_range: *mean_readnoise_yr_amp4 - height: 800 - width: 800 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'time', 'y': 'mean_rn', 'size': 6, 'source': *mean_readnoise_source_amp4} -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Readnoise Difference Image -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &diff_source - ref: "diff_source" - data: - dh: [1] - dw: [1] - image: [[[0,0], [0, 0]]] -- !Figure: &readnoise_diff_image - ref: "readnoise_diff_image" - title: 'Readnoise Difference (most recent dark - pipeline reffile)' - elements: - - {"kind": "image", "image": "image", "x": 0, "y": 0, "dh": 'dh', "dw": 'dh', "source": *diff_source} - tools: "" -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Readnoise Difference Histogram -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -- !ColumnDataSource: &diff_hist_source - ref: "diff_hist_source" - data: - n: [] - bin_centers: [] -- !Range1d: &diff_hist_xr - ref: "diff_hist_xr" - start: 0 - end: 1 - bounds: 'auto' -- !Range1d: &diff_hist_yr - ref: "diff_hist_yr" - start: 0 - end: 10 - bounds: 'auto' -- !Figure: &readnoise_diff_hist - ref: "readnoise_diff_hist" - x_axis_label: "Readnoise Difference [DN]" - y_axis_label: "Number of Pixels" - x_range: *diff_hist_xr - y_range: *diff_hist_yr - height: 250 - width: 300 - tools: "hover, wheel_zoom, pan, reset" - elements: - - {'kind': 'circle', 'x': 'bin_centers', 'y': 'n', 'size': 4, 'source': *diff_hist_source} - -# Document structure -# - !Document: -# - !row: -# - *mean_readnoise_figure \ No newline at end of file diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_ta_detail_panel.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_ta_detail_panel.yaml new file mode 100644 index 000000000..8249b9a5e --- /dev/null +++ b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_ta_detail_panel.yaml @@ -0,0 +1,363 @@ +# Panel 2: individual selected visit +# Primary V2/V3 offsets figure +- !ColumnDataSource: &offsets_source + ref: "offsets_source_visit_{vid}" + data: + dv2: [] + dv3: [] + v2m: [] + v3m: [] + v2d: [] + v3d: [] + nrs: [] + ccs: [] + rcs: [] + gxt: [] + gyt: [] + bkg: [] + cl1: [] + rl1: [] + cbx: [] + cc1: [] + rc1: [] + cfx: [] + cdc: [] + rdc: [] + csc: [] + ysx: [] + xsx: [] + refstar_no: [] + selection_on_change: ['indices', !self.select_stamp ] +- !Range1d: &offsets_xr + ref: "offsets_xr_visit_{vid}" + start: 0 + end: 1 + bounds: !!python/tuple [0, 1] +- !Range1d: &offsets_yr + ref: "offsets_yr_visit_{vid}" + start: 0 + end: 1 + bounds: !!python/tuple [0, 1] +- !Figure: &offsets_visit_fig + ref: "offsets_fig_visit_{vid}" + title: "V2/V3 Offsets" + height: 700 + width: 700 +# x_axis_label: "V2 Offset (arcsec)" +# y_axis_label: "V3 Offset (arcsec)" + x_axis_location: null + y_axis_location: null + min_border: 10 + min_border_left: 50 + x_range: *offsets_xr + y_range: *offsets_yr + toolbar_location: "above" + tools: "tap,wheel_zoom" + elements: + - {{'kind': 'circle', 'x': 'dv2', 'y': 'dv3', 'source': *offsets_source, 'size': 6}} +# Residuals histograms +- !ColumnDataSource: &resid_source + ref: "resid_source_visit_{vid}" + data: + bottom: [] + v2_l: [] + v2_r: [] + v2_t: [] + v3_l: [] + v3_r: [] + v3_t: [] +- !Range1d: &resid_xr + ref: "resid_xr_visit_{vid}" + start: 0 + end: 20 + bounds: 'auto' +- !Range1d: &resid_yr + ref: "resid_yr_visit_{vid}" + start: 0 + end: 20 + bounds: 'auto' +- !Figure: &residuals_hist_v2 + ref: "resid_hist_v2_visit_{vid}" + toolbar_location: null +# title: "V2 Residuals" + x_axis_label: "V2 offset (arcsec)" + y_axis_label: "Number of stars" + y_axis_location: "right" + min_border: 10 + min_border_left: 50 + tools: "" + height: 200 + width: 700 + x_range: *offsets_xr + y_range: *resid_yr + elements: + - {{"kind": 'quad', "top": "v2_t", "left": "v2_l", "right": "v2_r", "bottom": "bottom", "source": *resid_source, "fill_color": "blue", "line_color": "white", "alpha": 0.7}} +- !Figure: &residuals_hist_v3 + ref: "resid_hist_v3_visit_{vid}" +# title: "V3 Residuals" + y_axis_label: "V3 offset (arcsec)" + x_axis_label: "Number of stars" + y_axis_location: "right" + min_border: 10 + tools: "" + height: 700 + width: 200 + toolbar_location: null + x_range: *resid_xr + y_range: *offsets_yr + elements: + - {{"kind": 'quad', "top": "v3_r", "left": "bottom", "right": "v3_t", "bottom": "v3_l", "source": *resid_source, "fill_color": "green", "line_color": "white", "alpha": 0.7}} +# Common objects for all postage stamp figures +- !ColumnDataSource: &stamp_source + ref: "stamp_source_visit_{vid}" + data: + x: [0] + y: [0] + dh: [1] + dw: [1] + slope1: [[[1,0], [0, 1]]] + slope2: [[[1,0], [0, 1]]] + crj: [[[1,0], [0, 1]]] + bkg: [[[1,0], [0, 1]]] + flat: [[[1,0], [0, 1]]] +- !ColumnDataSource: &checkbox_source + ref: "checkbox_source_visit_{vid}" + data: + l: [0] + r: [1] + t: [1] + b: [1] + x: [0] + y: [1] +- !Range1d: &stamp_xr + ref: "stamp_xr_visit_{vid}" + start: 0 + end: 1 + bounds: !!python/tuple [0, 1] +- !Range1d: &stamp_yr + ref: "stamp_yr_visit_{vid}" + start: 0 + end: 1 + bounds: !!python/tuple [0, 1] +# slope1 stamp figure +- !LinearColorMapper: &slope1_mapper + ref: "slope1_mapper_visit_{vid}" + palette: "Plasma256" + low: 0. + high: 1. +- !ColorBar: &slope1_cbar + color_mapper: *slope1_mapper + location: !!python/tuple [0, 0] +- !Figure: &slope1_fig + ref: "slope1_fig_visit_{vid}" + title: "slope1 stamp" + x_axis_label: "Col = SIAF det Y" + y_axis_label: "Row = SIAF det X" + x_range: *stamp_xr + y_range: *stamp_yr + tools: "" + toolbar_location: null + height: 300 + width: 400 + visible: False + elements: + - {{"kind": "image", "image": "slope1", "x": "x", "y": "y", "dh": "dh", "dw": "dw", "source": *stamp_source, "color_mapper": *slope1_mapper}} + - {{"kind": "layout", "obj": *slope1_cbar, "place": "right"}} + - {{"kind": "quad", "left": "l", "right": "r", "top": "t", "bottom": "b", "source": *checkbox_source, "fill_alpha": 0., "line_color": "cyan"}} + - {{"kind": "x", "x": "x", "y": "y", "source": *checkbox_source, "color": "cyan", "size": 5}} +# slope2 stamp figure +- !LinearColorMapper: &slope2_mapper + ref: "slope2_mapper_visit_{vid}" + palette: "Plasma256" + low: 0. + high: 1. +- !ColorBar: &slope2_cbar + color_mapper: *slope2_mapper + location: !!python/tuple [0, 0] +- !Figure: &slope2_fig + ref: "slope2_fig_visit_{vid}" + title: "slope2 stamp" + x_axis_label: "Col = SIAF det Y" + y_axis_label: "Row = SIAF det X" + tools: "" + toolbar_location: null + height: 300 + width: 400 + x_range: *stamp_xr + y_range: *stamp_yr + visible: False + elements: + - {{"kind": "image", "image": "slope2", "x": "x", "y": "y", "dh": "dh", "dw": "dw", "source": *stamp_source, "color_mapper": *slope2_mapper}} + - {{"kind": "layout", "obj": *slope2_cbar, "place": "right"}} + - {{"kind": "quad", "left": "l", "right": "r", "top": "t", "bottom": "b", "source": *checkbox_source, "fill_alpha": 0., "line_color": "cyan"}} + - {{"kind": "x", "x": "x", "y": "y", "source": *checkbox_source, "color": "cyan", "size": 5}} +# crj stamp figure +- !LinearColorMapper: &crj_mapper + ref: "crj_mapper_visit_{vid}" + palette: "Plasma256" + low: 0. + high: 1. +- !ColorBar: &crj_cbar + color_mapper: *crj_mapper + location: !!python/tuple [0, 0] +- !Figure: &crj_fig + ref: "crj_fig_visit_{vid}" + title: "crj stamp" + x_axis_label: "Col = SIAF det Y" + y_axis_label: "Row = SIAF det X" + tools: "" + toolbar_location: null + height: 300 + width: 400 + x_range: *stamp_xr + y_range: *stamp_yr + visible: False + elements: + - {{"kind": "image", "image": "crj", "x": "x", "y": "y", "dh": "dh", "dw": "dw", "source": *stamp_source, "color_mapper": *crj_mapper}} + - {{"kind": "layout", "obj": *crj_cbar, "place": "right"}} + - {{"kind": "quad", "left": "l", "right": "r", "top": "t", "bottom": "b", "source": *checkbox_source, "fill_alpha": 0., "line_color": "cyan"}} + - {{"kind": "x", "x": "x", "y": "y", "source": *checkbox_source, "color": "cyan", "size": 5}} +# bkg_subtracted stamp figure +- !LinearColorMapper: &bkg_mapper + ref: "bkg_mapper_visit_{vid}" + palette: "Plasma256" + low: 0. + high: 1. +- !ColorBar: &bkg_cbar + color_mapper: *bkg_mapper + location: !!python/tuple [0, 0] +- !Figure: &bkg_fig + ref: "bkg_fig_visit_{vid}" + title: "bkg_subtracted stamp" + x_axis_label: "Col = SIAF det Y" + y_axis_label: "Row = SIAF det X" + tools: "" + toolbar_location: null + height: 300 + width: 400 + x_range: *stamp_xr + y_range: *stamp_yr + visible: False + elements: + - {{"kind": "image", "image": "bkg", "x": "x", "y": "y", "dh": "dh", "dw": "dw", "source": *stamp_source, "color_mapper": *bkg_mapper}} + - {{"kind": "layout", "obj": *bkg_cbar, "place": "right"}} + - {{"kind": "quad", "left": "l", "right": "r", "top": "t", "bottom": "b", "source": *checkbox_source, "fill_alpha": 0., "line_color": "cyan"}} + - {{"kind": "x", "x": "x", "y": "y", "source": *checkbox_source, "color": "cyan", "size": 5}} +# stamp_flat stamp figure +- !LinearColorMapper: &flat_mapper + ref: "flat_mapper_visit_{vid}" + palette: "Greys256" + low: 0. + high: 1. +- !ColorBar: &flat_cbar + color_mapper: *flat_mapper + location: !!python/tuple [0, 0] +- !Figure: &flat_fig + ref: "flat_fig_visit_{vid}" + title: "stamp_flat stamp" + x_axis_label: "Col = SIAF det Y" + y_axis_label: "Row = SIAF det X" + toolbar_location: null + tools: "" + height: 300 + width: 400 + x_range: *stamp_xr + y_range: *stamp_yr + visible: False + elements: + - {{"kind": "image", "image": "flat", "x": "x", "y": "y", "dh": "dh", "dw": "dw", "source": *stamp_source, "color_mapper": *flat_mapper}} + - {{"kind": "layout", "obj": *flat_cbar, "place": "right"}} + - {{"kind": "quad", "left": "l", "right": "r", "top": "t", "bottom": "b", "source": *checkbox_source, "fill_alpha": 0., "line_color": "cyan"}} + - {{"kind": "x", "x": "x", "y": "y", "source": *checkbox_source, "color": "cyan", "size": 5}} +# Div for long-form star output +- !Div: &output_div + ref: "output_div_visit_{vid}" +# Table display of stamp data +- !DataTable: &output_table + ref: "output_table_visit_{vid}" + sizing_mode: "stretch_width" + width_policy: "max" + height: 500 + source: *offsets_source + columns: + - !TableColumn: + field: "refstar_no" + title: "Star Number" + - !TableColumn: + field: "v2d" + title: "V2 desired" + - !TableColumn: + field: "v3d" + title: "V3 desired" + - !TableColumn: + field: "nrs" + title: "Detector" + - !TableColumn: + field: "ccs" + title: "Corner col" + - !TableColumn: + field: "rcs" + title: "Corner row" + - !TableColumn: + field: "gxt" + title: "GWA x tilt" + - !TableColumn: + field: "gyt" + title: "GWA y tilt" + - !TableColumn: + field: "bkg" + title: "Background measured" + - !TableColumn: + field: "cl1" + title: "Locate col" + - !TableColumn: + field: "rl1" + title: "Locate row" + - !TableColumn: + field: "cc1" + title: "Center col" + - !TableColumn: + field: "rc1" + title: "Center row" + - !TableColumn: + field: "cfx" + title: "Centroid flux" + - !TableColumn: + field: "cdc" + title: "Detector center col" + - !TableColumn: + field: "rdc" + title: "Detector center row" + - !TableColumn: + field: "csc" + title: "Centroid success" + - !TableColumn: + field: "v2m" + title: "V2 measured" + - !TableColumn: + field: "v3m" + title: "V3 measured" + - !TableColumn: + field: "ysx" + title: "Expected SIAF y" + - !TableColumn: + field: "xsx" + title: "Expected SIAF x" +- !Panel: + ref: "panel_visit_{vid}" + title: "{vid}" + tags: ["{vid}"] + #closable: true #uncomment this when the bug is fixed + child: + !column: + - !row: + - !gridplot: + children: + - [*offsets_visit_fig, *residuals_hist_v3] + - [*residuals_hist_v2, null] + merge_tools: False + - !layout: + - [*slope1_fig, *slope2_fig] + - [*crj_fig, *bkg_fig] + - [*flat_fig, *output_div] + - *output_table diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/monitor_ta_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_ta_interface.yaml new file mode 100644 index 000000000..475840b37 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_pages/yaml/monitor_ta_interface.yaml @@ -0,0 +1,169 @@ +# Panel 1: Trending all visits +# Common data source +- !ColumnDataSource: &offsets_trend_source + ref: "trend_source" + data: + visit: [] + oss_v2: [] + oss_v2_lo: [] + oss_v2_hi: [] + oss_v3: [] + oss_v3_lo: [] + oss_v3_hi: [] + rep_v2: [] + rep_v2_lo: [] + rep_v2_hi: [] + rep_v3: [] + rep_v3_lo: [] + rep_v3_hi: [] + rep_theta: [] #these aren't being used yet + rep_theta_lo: [] + rep_theta_hi: [] + oss_theta: [] + oss_theta_lo: [] + oss_theta_hi: [] + selection_on_change: ['indices', !self.select_visit ] +# V2/V3 offsets trending figure +- !Range1d: &v2v3_offsets_trend_xr + ref: "v2_offsets_trend_range" + start: 0 + end: 1 + bounds: !!python/tuple [0, 1] +- !Range1d: &v2v3_offsets_trend_yr + ref: "v3_offsets_trend_range" + start: 0 + end: 1 + bounds: !!python/tuple [0, 1] +# Whisker models for error bars +- !Whisker: &rep_v2_err + dimension: 'width' + source: *offsets_trend_source + base: 'rep_v3' + lower: 'rep_v2_lo' + upper: 'rep_v2_hi' +- !Whisker: &rep_v3_err + dimension: 'height' + source: *offsets_trend_source + base: 'rep_v2' + lower: 'rep_v3_lo' + upper: 'rep_v3_hi' +- !Whisker: &oss_v2_err + dimension: 'width' + source: *offsets_trend_source + base: 'oss_v3' + lower: 'oss_v2_lo' + upper: 'oss_v2_hi' +- !Whisker: &oss_v3_err + dimension: 'height' + source: *offsets_trend_source + base: 'oss_v2' + lower: 'oss_v3_lo' + upper: 'oss_v3_hi' +# Actual figure +- !Figure: &v2v3_offsets_trend_fig + ref: "v2v3_offsets_trend_fig" + title: "NIRSpec MSA Pointing" + height: 700 + width: 700 + x_axis_label: "V2 offset (arcsec)" + y_axis_label: "V3 offset (arcsec)" + x_range: *v2v3_offsets_trend_xr + y_range: *v2v3_offsets_trend_yr + tools: "tap,wheel_zoom" + elements: + - {'kind': 'circle', 'x': 'oss_v2', 'y': 'oss_v3', 'source': *offsets_trend_source, 'size': 6, 'color': 'orange', 'legend_label': 'OSS'} + - {'kind': 'circle', 'x': 'rep_v2', 'y': 'rep_v3', 'source': *offsets_trend_source, 'size': 6, 'color': 'green', 'legend_label': 'replica'} + - {'kind': 'layout', 'obj': *oss_v2_err} + - {'kind': 'layout', 'obj': *oss_v3_err} + - {'kind': 'layout', 'obj': *rep_v2_err} + - {'kind': 'layout', 'obj': *rep_v3_err} +# Offsets vs time trending figure +- !FactorRange: &visit_offsets_trend_xr + ref: "visit_offsets_trend_xr" + factors: [] +- !Range1d: &visit_offsets_trend_yr + ref: "visit_offsets_trend_yr" + start: 0 + end: 1 + bounds: !!python/tuple [0, 1] +# Whisker models for error bars +- !Whisker: &rep_v2_err_time + ref: "rep_v2_err_time" + dimension: 'height' + source: *offsets_trend_source + base: 'visit' + lower: 'rep_v2_lo' + upper: 'rep_v2_hi' +- !Whisker: &rep_v3_err_time + ref: "rep_v3_err_time" + dimension: 'height' + source: *offsets_trend_source + base: 'visit' + lower: 'rep_v3_lo' + upper: 'rep_v3_hi' +- !Whisker: &oss_v2_err_time + ref: "oss_v2_err_time" + dimension: 'height' + source: *offsets_trend_source + base: 'visit' + lower: 'oss_v2_lo' + upper: 'oss_v2_hi' +- !Whisker: &oss_v3_err_time + ref: "oss_v3_err_time" + dimension: 'height' + source: *offsets_trend_source + base: 'visit' + lower: 'oss_v3_lo' + upper: 'oss_v3_hi' +- !Figure: &time_offsets_trend_fig + ref: "time_offsets_trend_fig" + title: "NIRSpec MSA Pointing" + height: 700 + width: 700 + x_axis_label: "Visit ID" + y_axis_label: "Offset (arcsec)" + x_range: *visit_offsets_trend_xr + y_range: *visit_offsets_trend_yr + tools: "tap,wheel_zoom" + elements: + - {'kind': 'circle', 'x': 'visit', 'y': 'oss_v2', 'source': *offsets_trend_source, 'size': 6, 'color': 'red', 'legend_label': 'V2 (OSS)', "name": "oss_v2"} + - {'kind': 'circle', 'x': 'visit', 'y': 'oss_v3', 'source': *offsets_trend_source, 'size': 6, 'color': 'orange', 'legend_label': 'V3 (OSS)', "name": "oss_v3"} + - {'kind': 'circle', 'x': 'visit', 'y': 'rep_v2', 'source': *offsets_trend_source, 'size': 6, 'color': 'blue', 'legend_label': 'V2 (replica)', "name": "rep_v2"} + - {'kind': 'circle', 'x': 'visit', 'y': 'rep_v3', 'source': *offsets_trend_source, 'size': 6, 'color': 'green', 'legend_label': 'V3 (replica)', "name": "rep_v3"} + - {'kind': 'layout', 'obj': *oss_v2_err_time} + - {'kind': 'layout', 'obj': *oss_v3_err_time} + - {'kind': 'layout', 'obj': *rep_v2_err_time} + - {'kind': 'layout', 'obj': *rep_v3_err_time} +# Open detail tabs: +- !Select: &visit_select + ref: "visit_select" + value: "" + options: !self.available_visits + on_change: ["value", !self.select_visit ] +- !Button: &detail_button + ref: "detail_button" + label: "Open visit tab" + disabled: true + on_click: !self.open_visit_panel +# Tabs widget +- !Tabs: &msata_tabs + ref: "msata_tabs" + sizing_mode: "stretch_width" + tabs: + - !Panel: + title: "Trending" + child: + !column: + - !row: + - !Div: + text: "Select a visit:" + - *visit_select + - *detail_button + - !row: + - *v2v3_offsets_trend_fig + - *time_offsets_trend_fig + +# Document structure +- !Document: + - *msata_tabs + diff --git a/jwql/website/apps/jwql/monitor_views.py b/jwql/website/apps/jwql/monitor_views.py index 99cc4a460..3835f744b 100644 --- a/jwql/website/apps/jwql/monitor_views.py +++ b/jwql/website/apps/jwql/monitor_views.py @@ -4,6 +4,7 @@ ------- - Lauren Chambers + - Maria Pena-Guerrero Use --- @@ -29,13 +30,23 @@ import os +from bokeh.resources import CDN, INLINE +from django.http import HttpResponse, JsonResponse from django.shortcuts import render +import json from . import bokeh_containers +from jwql.website.apps.jwql import bokeh_containers +from jwql.website.apps.jwql.monitor_pages.monitor_readnoise_bokeh import ReadNoiseFigure from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.utils import get_config +from jwql.utils.utils import get_config, get_base_url +from jwql.instrument_monitors.nirspec_monitors.ta_monitors import msata_monitor +from jwql.instrument_monitors.nirspec_monitors.ta_monitors import wata_monitor +from jwql.utils import monitor_utils -FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') + +CONFIG = get_config() +FILESYSTEM_DIR = os.path.join(CONFIG['jwql_dir'], 'filesystem') def bad_pixel_monitor(request, inst): @@ -53,20 +64,13 @@ def bad_pixel_monitor(request, inst): HttpResponse object Outgoing response sent to the webpage """ - - # Ensure the instrument is correctly capitalized - inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] - - tabs_components = bokeh_containers.bad_pixel_monitor_tabs(inst) - - template = "bad_pixel_monitor.html" + # Locate the html file for the instrument + template = f"{inst.lower()}_bad_pix_plots.html" context = { 'inst': inst, - 'tabs_components': tabs_components, } - # Return a HTTP response with the template and dictionary of variables return render(request, template, context) @@ -88,11 +92,37 @@ def bias_monitor(request, inst): # Ensure the instrument is correctly capitalized inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + template = f"{inst.lower()}_bias_plots.html" + + context = { + 'inst': inst, + } + + # Return a HTTP response with the template and dictionary of variables + return render(request, template, context) + + +def cosmic_ray_monitor(request, inst): + """Generate the cosmic ray monitor page for a given instrument + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + inst : str + Name of JWST instrument + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + + # Ensure the instrument is correctly capitalized + inst = inst.upper() - # Get the html and JS needed to render the bias tab plots - tabs_components = bokeh_containers.bias_monitor_tabs(inst) + tabs_components = bokeh_containers.cosmic_ray_monitor_tabs(inst) - template = "bias_monitor.html" + template = "cosmic_ray_monitor.html" context = { 'inst': inst, @@ -135,6 +165,42 @@ def dark_monitor(request, inst): return render(request, template, context) +def edb_monitor(request, inst): + """Generate the EDB telemetry monitor page for a given instrument + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + inst : str + Name of JWST instrument + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + inst = inst.lower() + plot_dir = os.path.join(CONFIG["outputs"], "edb_telemetry_monitor", inst) + json_file = f'edb_{inst}_tabbed_plots.json' + + # Get the json data that contains the tabbed plots + with open(os.path.join(plot_dir, json_file), 'r') as fp: + data = json.dumps(json.loads(fp.read())) + + template = "edb_monitor.html" + + context = { + 'inst': JWST_INSTRUMENT_NAMES_MIXEDCASE[inst], + 'json_object': data, + 'resources': CDN.render() + } + + # Return a HTTP response with the template and dictionary of variables + return render(request, template, context) + + def readnoise_monitor(request, inst): """Generate the readnoise monitor page for a given instrument @@ -155,7 +221,7 @@ def readnoise_monitor(request, inst): inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] # Get the html and JS needed to render the readnoise tab plots - tabs_components = bokeh_containers.readnoise_monitor_tabs(inst) + tabs_components = ReadNoiseFigure(inst).tab_components template = "readnoise_monitor.html" @@ -166,3 +232,101 @@ def readnoise_monitor(request, inst): # Return a HTTP response with the template and dictionary of variables return render(request, template, context) + + +def msata_monitoring(request): + """Container for MSATA monitor + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + # get the template and embed the plots + template = "msata_monitor.html" + + context = { + 'inst': 'NIRSpec', + 'base_url': get_base_url() + } + + # Return a HTTP response with the template and dictionary of variables + return render(request, template, context) + + +def msata_monitoring_ajax(request): + """Generate the MSATA monitor results to display in the monitor page + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + JsonResponse object + Outgoing response sent to the webpage + """ + # retrieve existing monitor html content + monitor = msata_monitor.MSATA() + div, script1, script2 = monitor.read_existing_html() + + context = {'script1': script1, + 'script2': script2, + 'div': div} + + return JsonResponse(context, json_dumps_params={'indent': 2}) + + +def wata_monitoring(request): + """Container for WATA monitor + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + # get the template and embed the plots + template = "wata_monitor.html" + + context = { + 'inst': 'NIRSpec', + 'base_url': get_base_url() + } + + # Return a HTTP response with the template and dictionary of variables + return render(request, template, context) + + +def wata_monitoring_ajax(request): + """Generate the WATA monitor results to display in the monitor page + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + JsonResponse object + Outgoing response sent to the webpage + """ + # retrieve existing monitor html content + monitor = wata_monitor.WATA() + div, script1, script2 = monitor.read_existing_html() + + context = {'script1': script1, + 'script2': script2, + 'div': div} + + return JsonResponse(context, json_dumps_params={'indent': 2}) diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css index a5265f09e..eb567b25f 100644 --- a/jwql/website/apps/jwql/static/css/jwql.css +++ b/jwql/website/apps/jwql/static/css/jwql.css @@ -1,379 +1,278 @@ .anomaly_choice { - list-style: none; -} - -.APT_parameters { - width: 20% -} - -.banner { - position: absolute; - top: 55px; - width: 100%; - height: 3rem; - overflow: hidden; -} - -.banner img{ - position: absolute; - top: -9999px; - left: -9999px; - right: -9999px; - bottom: -9999px; - width: 100%; - margin: auto; -} - -/*Make normal buttons and highlighted outline buttons orange*/ -.btn-primary, .btn-primary.disabled, .btn-outline-primary:hover, -.btn-outline-primary.active { - background-color: #c85108 !important; - border-color: #c85108 !important; - color: white !important; - border-radius: 0px; - text-decoration: none; -} - -/*Make outline buttons and highlighted normal buttons white*/ -.btn-primary:hover, .btn-primary.active, .btn-outline-primary, -.show > .btn-primary.dropdown-toggle { - background-color: white !important; - border-color: #c85108 !important ; - color: #c85108 !important; - border-radius: 0px; - text-decoration: none; -} - -/*Stop them from glowing blue*/ -.btn.focus, .btn:active:focus, .btn.active:focus, .btn:active, -.btn.active, .show > .btn.dropdown-toggle:focus { - box-shadow: none !important; - text-decoration: none; -} - -[class*="col-"] { - padding-top: 1rem; - padding-bottom: 1rem; -/* margin: 1rem; - background-color: rgba(86, 61, 124, .15); - border: 1px solid rgba(86, 61, 124, .2);*/ -} - -.dashboard { - margin-left: 2%; - margin-right: 2%; -} - -/* Show the dropdown menu on hover */ -/* DO NOT how the dropdown menu on hover if the navbar is collapsed */ -@media only screen and (min-width: 1200px) { - li.dropdown:hover .dropdown-menu { - display: block; - } -} - -/* Make disabled dropdown items grey and unclickable */ -.disabled-dropdown { - color: #bec4d4 !important; - pointer-events: none; - cursor: default; -} - -/*Define dropdown menu colors*/ -.dropdown-item:hover{ - background-color: black; -} - -.dropdown-menu { - background-color: #2d353c; - border-radius: 0px; -} - -.dropdown-menu .dropdown-item { - color: white; -} - -.dropdown-menu .dropdown-heading { - color: #c85108 !important; - text-transform: uppercase; -} - -/*Stop the search box from glowing blue*/ -.form-control:focus { - box-shadow: none; - border-color: #cfd4da; -} - -/*Make sure the thumbnails are actually vertically centered*/ -.helper { - display: inline-block; - height: 100%; - vertical-align: middle; -} - -.help-tip { - text-align: center; - background-color: #D0D7D8; - color: black; - border-radius: 50%; - width: 20px; - height: 20px; - font-size: 12px; - line-height: 20px; - font-weight: bold; - opacity: 0.5; - display: inline-block; -} - -/*Stop the search box from glowing blue*/ -#homepage_filesearch #id_search { - width: 500px; - height: 100%; - padding: 0px; -} - -/* START structures for engineering_database page -*/ - -#mnemonic_name_search { + list-style: none; + } + + .APT_parameters { + width: 20% + } + + .banner { + position: absolute; + top: 55px; width: 100%; - height: 100%; - padding: 0px; -} - -.mnemonic_name_search_row { - display: flex; + height: 3rem; + overflow: hidden; + } + + .banner img{ + position: absolute; + top: -9999px; + left: -9999px; + right: -9999px; + bottom: -9999px; width: 100%; -} - -.mnemonic_name_search_col { - padding: 1em; - border: 1px solid #F2CE3A; - width: 100%; -} - -.mnemonic_name_search_col1 { - padding: 1em; - border: 1px solid #F2CE3A; - width: 40%; -} + margin: auto; + } + + /* Make sure bokeh widgets match default theme */ + .bk-root, .bk-btn { + font-family: 'Overpass', sans-serif !important; + font-size: 1rem !important; + flex-grow: 0 !important; + } + + /*Make normal buttons and highlighted outline buttons orange*/ + .btn-primary, .btn-primary.disabled, .btn-outline-primary:hover, + .btn-outline-primary.active, .bk-btn-primary { + background-color: #c85108 !important; + border-color: #c85108 !important; + color: white !important; + border-radius: 0px !important; + text-decoration: none; + } + + /*Make outline buttons and highlighted normal buttons white*/ + .btn-primary:hover, .btn-primary.active, .btn-outline-primary, + .show > .btn-primary.dropdown-toggle, .bk-btn-primary:hover { + background-color: white !important; + border-color: #c85108 !important ; + color: #c85108 !important; + border-radius: 0px !important; + text-decoration: none; + } + + /*Stop them from glowing blue*/ + .btn.focus, .btn:active:focus, .btn.active:focus, .btn:active, + .btn.active, .show > .btn.dropdown-toggle:focus { + box-shadow: none !important; + text-decoration: none; + } + + [class*="col-"] { + padding-top: 1rem; + padding-bottom: 1rem; + /* margin: 1rem; + background-color: rgba(86, 61, 124, .15); + border: 1px solid rgba(86, 61, 124, .2);*/ + } + + .dashboard { + margin-left: 2%; + margin-right: 2%; + } + + /* Show the dropdown menu on hover */ + /* DO NOT how the dropdown menu on hover if the navbar is collapsed */ + @media only screen and (min-width: 1200px) { + li.dropdown:hover .dropdown-menu { + display: block; + } + } + + /* Make disabled buttons opaque and unclickable */ + .disabled_button { + pointer-events: none; + opacity: 0.65; + } -.mnemonic_query_section { - width: 100%; - height: 100%; + /* Make disabled dropdown items grey and unclickable */ + .disabled-dropdown { + color: #bec4d4 !important; + pointer-events: none; + cursor: default; + } + + /* Make disabled sections opaque and unclickable */ + .disabled_section { + pointer-events: none; + opacity: 0.4; + } + + /*Define dropdown menu colors*/ + .dropdown-item:hover{ + background-color: black; + } + + .dropdown-menu { + background-color: #2d353c; + border-radius: 0px; + max-height: 400px; + overflow-y: auto; + } + + .dropdown-menu .dropdown-item { + color: white; + } + + .dropdown-menu .dropdown-heading { + color: #c85108 !important; + text-transform: uppercase; + } + + .explorer_options { + padding-left: 1rem; + padding-right: 1rem; + } + + /*Stop the search box from glowing blue*/ + .form-control:focus { + box-shadow: none; + border-color: #cfd4da; + } + + /*Make sure the thumbnails are actually vertically centered*/ + .helper { + display: inline-block; + height: 100%; + vertical-align: middle; + } + + .help-tip { + text-align: center; + background-color: #D0D7D8; + color: black; + border-radius: 50%; + width: 20px; + height: 20px; + font-size: 12px; + line-height: 20px; + font-weight: bold; + opacity: 0.5; + display: inline-block; + } + + /*Stop the search box from glowing blue*/ + #homepage_filesearch #id_search { + width: 500px; + height: 100%; + padding: 0px; + } + + /* START structures for engineering_database page + */ + + #mnemonic_name_search { + width: 100%; + height: 100%; + padding: 0px; + } + + .mnemonic_name_search_row { + display: flex; + width: 100%; + } + + .mnemonic_name_search_col { padding: 1em; - /*border:solid #000000;*/ border: 1px solid #F2CE3A; -} - -.mnemonic_exploration_section { width: 100%; - height: 100%; - overflow: auto; + } + + .mnemonic_name_search_col1 { padding: 1em; - /*border:solid #000000;*/ border: 1px solid #F2CE3A; - line-height: 15px -} - -.mnemonic_query_field { -float:left; -width:300px; -list-style-type: none; -display : inline; -} - -/* END structures for engineering_database page */ - - -#homepage_filesearch #id_search:focus { - box-shadow: none; - border-color: #cfd4da; -} - -/*Make the form fields be inline*/ -.homepage_form_fieldWrapper { - display: inline; -} - -#id_anomaly_choices { - list-style: none; - padding-left: 0; -} - -/*Don't let the search bar be super long*/ -.input-group { - width: 250px; -} - -/*Make the search icon look like a button*/ -.input-group-text { - background-color: #c85108 !important; - border-color: #c85108 !important; - color: white !important; - border-radius: 0px; -} - -/*Format the color background*/ -.instrument-color-fill { - display: none; - width: 100%; - height: 100%; - background-color: #c85108; - opacity: 1.0; - position: absolute; - top: 0%; - left: 0%; - z-index: 1; -} - -/*To make the instrument logos vertically centered*/ -.instrument_frame { - height: 180px; - width: 180px; - background-color: white; - border: 3px solid #c85108; - position: relative; - display: inline-block; -} - -/*Make H2 header smaller for select pages*/ -#instrument_main h2, .mnemonic_trending_main h2 { - font-size: 1.75rem; -} - -.instrument-name { - font-size: 35px; - color: white; - text-transform: uppercase; - display: none; - width: 100%; - height: 100%; - padding-top: 0.5rem; - padding-left: 0.5rem; - padding-bottom: 0.5rem; - padding-right: 0.5rem; - position: absolute; - top: 0%; - left: 0%; - /*text-align: left;*/ - z-index: 2; - vertical-align: middle; -} - -.instrument_panel { - text-align: center; -} - -.instrument_panel:hover .instrument-color-fill { - display: inline; -} - -.instrument_panel:hover .instrument-name { - display: inline-block; -} - -.instrument_select {m; - padding-top: 1rem; - padding-bottom: 2rem; - margin-right: 5rem; - margin-left: 5rem; -} - -.image_preview { - display: inline-block; -} - -#loading { - text-align:center; - margin: 0 auto; - width: 200px; - z-index: 1000; -} - -.monitor-name { - background-color: #c85108; + width: 40%; + } + + .mnemonic_query_section { + width: 100%; + height: 100%; + padding: 1em; + /*border:solid #000000;*/ + border: 1px solid #F2CE3A; + } + + .mnemonic_exploration_section { + width: 100%; + height: 100%; + overflow: auto; + padding: 1em; + /*border:solid #000000;*/ + border: 1px solid #F2CE3A; + line-height: 15px + } + + .mnemonic_query_field { + float:left; + width:300px; + list-style-type: none; + display : inline; + } + + /* END structures for engineering_database page */ + + + #homepage_filesearch #id_search:focus { + box-shadow: none; + border-color: #cfd4da; + } + + /*Make the form fields be inline*/ + .homepage_form_fieldWrapper { + display: inline; + } + + #id_anomaly_choices { + list-style: none; + padding-left: 0; + } + + /*Don't let the search bar be super long*/ + .input-group { + width: 250px; + } + + /*Make the search icon look like a button*/ + .input-group-text { + background-color: #c85108 !important; + border-color: #c85108 !important; + color: white !important; + border-radius: 0px; + } + + /*Format the color background*/ + .instrument-color-fill { + display: none; + width: 100%; + height: 100%; + background-color: #c85108; + opacity: 1.0; + position: absolute; + top: 0%; + left: 0%; + z-index: 1; + } + + /*To make the instrument logos vertically centered*/ + .instrument_frame { + height: 180px; + width: 180px; + background-color: white; + border: 3px solid #c85108; + position: relative; + display: inline-block; + } + + /*Make H2 header smaller for select pages*/ + #instrument_main h2, .mnemonic_trending_main h2 { + font-size: 1.75rem; + } + + .instrument-name { + font-size: 35px; color: white; - width: 100%; - height: 100%; -} - -/* Change color of dropdown links on hover */ -li:hover .nav-link, .navbar-brand:hover { - color: #fff !important; -} - -/* Define navbar color*/ -.navbar { - background-color: black; -} - -/*Define navbar font color and case*/ -.nav-link { - color: #bec4d4 !important; text-transform: uppercase; -} - -/* Set padding around JWST logo*/ -.navbar-left { - padding-left:10px; - padding-right:10px; -} - -/* Get rid of padding around GitHub logo */ -#github-link { - padding-bottom: 0px; - padding-top: 0px; -} - -.plot-container { - width: 100%; - height: 600px; - position: relative; - display: inline-block; - border-color: #c85108 !important ; - border-style: solid; - border-radius: 0px; - border-width: 1px; -} - -.plot-header { - background-color: #c85108 !important; - border-color: #c85108 !important ; - color: #c85108 !important; - border-style: solid; - border-radius: 0px; - border-width: 1px; - width: 100%; -} - -/*Define the proposal thumbnails*/ -.proposal { - display: inline-block; - width: 8rem; - height: 8rem; - text-align: center; - position: relative; - display: inline-block; - margin: 0.1rem; -} - -.proposal img { - filter: grayscale(100%); -} - -.proposal-color-fill { - width: 100%; - height: 100%; - background-color: #356198; - opacity: 0.3; - position: absolute; - top: 0%; - left: 0%; - z-index: 1; -} - -.proposal-info { + display: none; width: 100%; height: 100%; padding-top: 0.5rem; @@ -383,169 +282,328 @@ li:hover .nav-link, .navbar-brand:hover { position: absolute; top: 0%; left: 0%; - text-align: left; - color: white; + /*text-align: left;*/ z-index: 2; - font-size: 0.75rem; -} - -.row { - margin-bottom: 1rem; -} - -.slider{ - -webkit-appearance: none; - width: 250px; - height: 15px; - background: #BEC4D4; - outline: none; -} - -/* slider style for Chrome/Safari/Opera/Edge */ -.slider::-webkit-slider-thumb { - -webkit-appearance: none; - appearance: none; - width: 15px; - height: 30px; - background: #C85108; - cursor: pointer; -} - -/* slider style for Firefox */ -.slider::-moz-range-thumb { - width: 15px; - height: 30px; - background: #C85108; - cursor: pointer; -} - -/* remove slider outline for Firefox */ -.slider::-moz-focus-outer { - border: 0; - } - -.row .row { - margin-top: 1rem; - margin-bottom: 0; -} - -/*Video for space 404 page*/ -#space_404 { - position: fixed; - object-fit: cover; - width: 100%; - height: 100%; - right: 0; - bottom: 0; - align: center; -} - -#space_404_text { - position: fixed; - background: rgba(0, 0, 0, 0.5); - color: white; - z-index: 100; - align: center; - padding: 2rem; - display: none; -} - -.thumbnail { - width: 8rem; - height: 8rem; + vertical-align: middle; + } + + .instrument_panel { text-align: center; - position: relative; + } + + .instrument_panel:hover .instrument-color-fill { + display: inline; + } + + .instrument_panel:hover .instrument-name { display: inline-block; - margin: 0.1rem; -} - -/*Format the color background*/ -.thumbnail-color-fill { - display: none; - width: 100%; - height: 100%; - background-color: #356198; - opacity: 0.3; - position: absolute; - top: 0%; - left: 0%; - z-index: 1; -} - -.thumbnail:hover { + } + + .instrument_select { + padding-top: 1rem; + padding-bottom: 2rem; + margin-right: 5rem; + margin-left: 5rem; + } + + .image_preview { + display: inline-block; + } + + #loading { + text-align:center; + margin: 0 auto; + width: 200px; + z-index: 1000; + } + + .monitor-name { + background-color: #c85108; + color: white; + width: 100%; + height: 100%; + } + + /* Change color of dropdown links on hover */ + li:hover .nav-link, .navbar-brand:hover { + color: #fff !important; + } + + /* Define navbar color*/ + .navbar { + background-color: black; + } + + /*Define navbar font color and case*/ + .nav-link { + color: #bec4d4 !important; + text-transform: uppercase; + } + + /* Set padding around JWST logo*/ + .navbar-left { + padding-left:10px; + padding-right:10px; + } + + /* Get rid of padding around GitHub logo */ + #github-link, #github-link-collapsed { + padding-bottom: 0px; + padding-top: 0px; + } + + .plot-container { + width: 100%; + height: 600px; + position: relative; + display: inline-block; + border-color: #c85108 !important ; + border-style: solid; + border-radius: 0px; + border-width: 1px; + } + + .plot-header { + background-color: #c85108 !important; + border-color: #c85108 !important ; + color: #c85108 !important; + border-style: solid; + border-radius: 0px; + border-width: 1px; + width: 100%; + } + + /*Define the proposal thumbnails*/ + .proposal { + display: inline-block; + width: 8rem; + height: 8rem; + text-align: center; + position: relative; + display: inline-block; + margin: 0.1rem; + } + + .proposal img { + filter: grayscale(100%); + } + + .proposal-color-fill { + width: 100%; + height: 100%; + background-color: #356198; + opacity: 0.3; + position: absolute; + top: 0%; + left: 0%; + z-index: 1; + } + + .proposal:hover { + cursor: pointer; + } + + .proposal:hover { + background-color: #356198; + opacity: 0.75; + } + + .proposal-info { + width: 100%; + height: 100%; + padding-top: 0.5rem; + padding-left: 0.5rem; + padding-bottom: 0.5rem; + padding-right: 0.5rem; + position: absolute; + top: 0%; + left: 0%; + text-align: left; + color: white; + z-index: 2; + font-size: 0.75rem; + } + + .row { + margin-bottom: 1rem; + } + + .slider{ + -webkit-appearance: none; + width: 250px; + height: 15px; + background: #BEC4D4; + outline: none; + } + + /* slider style for Chrome/Safari/Opera/Edge */ + .slider::-webkit-slider-thumb { + -webkit-appearance: none; + appearance: none; + width: 15px; + height: 30px; + background: #C85108; cursor: pointer; -} - -.thumbnail:hover .thumbnail-info, -.thumbnail:hover .thumbnail-color-fill { - display: inline; -} - -.thumbnail img { - max-width: 100%; - max-height: 100%; - width: auto; - height: auto; - vertical-align: middle; -} - -/*Format the proposal number and number of files*/ -.thumbnail-info { - display: none; + } + + /* slider style for Firefox */ + .slider::-moz-range-thumb { + width: 15px; + height: 30px; + background: #C85108; + cursor: pointer; + } + + /* remove slider outline for Firefox */ + .slider::-moz-focus-outer { + border: 0; + } + + .row .row { + margin-top: 1rem; + margin-bottom: 0; + } + + /*Video for space 404 page*/ + #space_404 { + position: fixed; + object-fit: cover; width: 100%; height: 100%; - padding-top: 0.5rem; - padding-left: 0.5rem; - padding-bottom: 0.5rem; - padding-right: 0.5rem; - position: absolute; - top: 0%; - left: 0%; - text-align: left; + right: 0; + bottom: 0; + align: center; + } + + #space_404_text { + position: fixed; + background: rgba(0, 0, 0, 0.5); + color: white; + z-index: 100; + align: center; + padding: 2rem; + display: none; + } + + .thumbnail { + width: 8rem; + height: 8rem; + text-align: center; + position: relative; + display: inline-block; + margin: 0.1rem; + } + + /*Format the color background*/ + .thumbnail-color-fill { + display: none; + width: 100%; + height: 100%; + background-color: #356198; + opacity: 0.3; + position: absolute; + top: 0%; + left: 0%; + z-index: 1; + } + + .thumbnail:hover { + cursor: pointer; + } + + .thumbnail:hover .thumbnail-info, + .thumbnail:hover .thumbnail-color-fill { + display: inline; + } + + .thumbnail img { + max-width: 100%; + max-height: 100%; + width: auto; + height: auto; + vertical-align: middle; + } + + /*Format the proposal number and number of files*/ + .thumbnail-info { + display: none; + width: 100%; + height: 100%; + padding-top: 0.5rem; + padding-left: 0.5rem; + padding-bottom: 0.5rem; + padding-right: 0.5rem; + position: absolute; + top: 0%; + left: 0%; + text-align: left; + color: white; + z-index: 2; + } + + .thumbnail-staff { + width: 15rem; + height: 15rem; + text-align: center; + position: relative; + display: inline-block; + margin: 0.1rem; + } + + /*Format thumbnail groups when active*/ + .thumbnail-group { + display: inline; + font-size: 0.75rem; + } + .thumbnail-group-active { + display: block; + width: 90%; + height: 90%; + border: 1px solid #2d353c; + box-shadow: 5px 5px #c85108, 10px 10px #bec4d4; + position: absolute; + font-size: 0.65rem; + } + + /*Format the version identifier text in bottom corner*/ + #version-div { + float: right; + width: 180px; + text-align: right; color: white; - z-index: 2; - font-size: 0.75rem; -} - -/*Format the version identifier text in bottom corner*/ -#version-div { - float: right; - width: 180px; - text-align: right; - color: white; - font-size: 12px -} - -/*Add underline for links*/ -a { - text-decoration: underline; -} - -/*Don't add underline for navbar and button links*/ -nav a, .btn { - text-decoration: none; -} - -body { - padding-top: 8rem; -} - -body { - font-family: 'Overpass', sans-serif !important; -} - -h1, h2, h3, h4, h5, h6 { - font-family: 'Oswald', sans-serif !important; -} -h1, h2, h3, h4 { - text-transform: uppercase; -} -h1 { - letter-spacing: 0.05em; -} - -ul.no-bullets { - list-style: none; - padding-left:10px; - line-height:25px; -} + font-size: 12px + } + + /*Add underline for links*/ + a { + text-decoration: underline; + } + + /*Don't add underline for navbar and button links*/ + nav a, .btn { + text-decoration: none; + } + + body { + padding-top: 8rem; + } + + body { + font-family: 'Overpass', sans-serif !important; + } + + h1, h2, h3, h4, h5, h6 { + font-family: 'Oswald', sans-serif !important; + } + h1, h2, h3, h4 { + text-transform: uppercase; + } + h1 { + letter-spacing: 0.05em; + } + + ul.no-bullets { + list-style: none; + padding-left:10px; + line-height:25px; + } diff --git a/jwql/website/apps/jwql/static/img/default_thumb.png b/jwql/website/apps/jwql/static/img/default_thumb.png new file mode 100644 index 000000000..cdb71f600 Binary files /dev/null and b/jwql/website/apps/jwql/static/img/default_thumb.png differ diff --git a/jwql/website/apps/jwql/static/img/dev-Clarke_Melanie_MESA.jpg b/jwql/website/apps/jwql/static/img/dev-Clarke_Melanie_MESA.jpg new file mode 100644 index 000000000..2864e7bec Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev-Clarke_Melanie_MESA.jpg differ diff --git a/jwql/website/apps/jwql/static/img/dev-Fix_Mees_MESA.png b/jwql/website/apps/jwql/static/img/dev-Fix_Mees_MESA.png new file mode 100644 index 000000000..9944f92c5 Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev-Fix_Mees_MESA.png differ diff --git a/jwql/website/apps/jwql/static/img/dev-Hilbert_Bryan_NIRCam.png b/jwql/website/apps/jwql/static/img/dev-Hilbert_Bryan_NIRCam.png new file mode 100644 index 000000000..0d9b7c613 Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev-Hilbert_Bryan_NIRCam.png differ diff --git a/jwql/website/apps/jwql/static/img/dev_Cooper_Rachel_NIRISS.png b/jwql/website/apps/jwql/static/img/dev_Cooper_Rachel_NIRISS.png new file mode 100644 index 000000000..9d9850fbe Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev_Cooper_Rachel_NIRISS.png differ diff --git a/jwql/website/apps/jwql/static/img/dev_Cracraft_Misty_MIRI.png b/jwql/website/apps/jwql/static/img/dev_Cracraft_Misty_MIRI.png new file mode 100644 index 000000000..1f8755605 Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev_Cracraft_Misty_MIRI.png differ diff --git a/jwql/website/apps/jwql/static/img/dev_Engesser_Mike_MIRI.png b/jwql/website/apps/jwql/static/img/dev_Engesser_Mike_MIRI.png new file mode 100644 index 000000000..6846b298c Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev_Engesser_Mike_MIRI.png differ diff --git a/jwql/website/apps/jwql/static/img/dev_Pena-Guerrero_Maria_NIRSpec.png b/jwql/website/apps/jwql/static/img/dev_Pena-Guerrero_Maria_NIRSpec.png new file mode 100644 index 000000000..8e3da5497 Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev_Pena-Guerrero_Maria_NIRSpec.png differ diff --git a/jwql/website/apps/jwql/static/img/dev_Sappington_Brad_MESA.png b/jwql/website/apps/jwql/static/img/dev_Sappington_Brad_MESA.png new file mode 100644 index 000000000..1362d3cc9 Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev_Sappington_Brad_MESA.png differ diff --git a/jwql/website/apps/jwql/static/img/dev_Sunnquist_Ben_NIRCam.png b/jwql/website/apps/jwql/static/img/dev_Sunnquist_Ben_NIRCam.png new file mode 100644 index 000000000..61a621e4b Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev_Sunnquist_Ben_NIRCam.png differ diff --git a/jwql/website/apps/jwql/static/img/dev_York_Brian_MESA.png b/jwql/website/apps/jwql/static/img/dev_York_Brian_MESA.png new file mode 100644 index 000000000..aaf27019b Binary files /dev/null and b/jwql/website/apps/jwql/static/img/dev_York_Brian_MESA.png differ diff --git a/jwql/website/apps/jwql/static/js/bootstrap.bundle.min.js b/jwql/website/apps/jwql/static/js/bootstrap.bundle.min.js index 7d50e873a..72a46cf98 100644 --- a/jwql/website/apps/jwql/static/js/bootstrap.bundle.min.js +++ b/jwql/website/apps/jwql/static/js/bootstrap.bundle.min.js @@ -1,7 +1,7 @@ /*! - * Bootstrap v4.0.0 (https://getbootstrap.com) + * Bootstrap v4.1.3 (https://getbootstrap.com/) * Copyright 2011-2018 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],e):e(t.bootstrap={},t.jQuery)}(this,function(t,e){"use strict";function n(t,e){for(var n=0;n0?i:null}catch(t){return null}},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(n){t(n).trigger(e.end)},supportsTransitionEnd:function(){return Boolean(e)},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var r in n)if(Object.prototype.hasOwnProperty.call(n,r)){var o=n[r],s=e[r],a=s&&i.isElement(s)?"element":(l=s,{}.toString.call(l).match(/\s([a-zA-Z]+)/)[1].toLowerCase());if(!new RegExp(o).test(a))throw new Error(t.toUpperCase()+': Option "'+r+'" provided type "'+a+'" but expected type "'+o+'".')}var l}};return e=("undefined"==typeof window||!window.QUnit)&&{end:"transitionend"},t.fn.emulateTransitionEnd=n,i.supportsTransitionEnd()&&(t.event.special[i.TRANSITION_END]={bindType:e.end,delegateType:e.end,handle:function(e){if(t(e.target).is(this))return e.handleObj.handler.apply(this,arguments)}}),i}(e=e&&e.hasOwnProperty("default")?e.default:e),L=(s="alert",l="."+(a="bs.alert"),c=(o=e).fn[s],h={CLOSE:"close"+l,CLOSED:"closed"+l,CLICK_DATA_API:"click"+l+".data-api"},f="alert",u="fade",d="show",p=function(){function t(t){this._element=t}var e=t.prototype;return e.close=function(t){t=t||this._element;var e=this._getRootElement(t);this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},e.dispose=function(){o.removeData(this._element,a),this._element=null},e._getRootElement=function(t){var e=k.getSelectorFromElement(t),n=!1;return e&&(n=o(e)[0]),n||(n=o(t).closest("."+f)[0]),n},e._triggerCloseEvent=function(t){var e=o.Event(h.CLOSE);return o(t).trigger(e),e},e._removeElement=function(t){var e=this;o(t).removeClass(d),k.supportsTransitionEnd()&&o(t).hasClass(u)?o(t).one(k.TRANSITION_END,function(n){return e._destroyElement(t,n)}).emulateTransitionEnd(150):this._destroyElement(t)},e._destroyElement=function(t){o(t).detach().trigger(h.CLOSED).remove()},t._jQueryInterface=function(e){return this.each(function(){var n=o(this),i=n.data(a);i||(i=new t(this),n.data(a,i)),"close"===e&&i[e](this)})},t._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},i(t,null,[{key:"VERSION",get:function(){return"4.0.0"}}]),t}(),o(document).on(h.CLICK_DATA_API,'[data-dismiss="alert"]',p._handleDismiss(new p)),o.fn[s]=p._jQueryInterface,o.fn[s].Constructor=p,o.fn[s].noConflict=function(){return o.fn[s]=c,p._jQueryInterface},p),P=(m="button",v="."+(_="bs.button"),E=".data-api",y=(g=e).fn[m],b="active",T="btn",C="focus",w='[data-toggle^="button"]',I='[data-toggle="buttons"]',A="input",D=".active",S=".btn",O={CLICK_DATA_API:"click"+v+E,FOCUS_BLUR_DATA_API:"focus"+v+E+" blur"+v+E},N=function(){function t(t){this._element=t}var e=t.prototype;return e.toggle=function(){var t=!0,e=!0,n=g(this._element).closest(I)[0];if(n){var i=g(this._element).find(A)[0];if(i){if("radio"===i.type)if(i.checked&&g(this._element).hasClass(b))t=!1;else{var r=g(n).find(D)[0];r&&g(r).removeClass(b)}if(t){if(i.hasAttribute("disabled")||n.hasAttribute("disabled")||i.classList.contains("disabled")||n.classList.contains("disabled"))return;i.checked=!g(this._element).hasClass(b),g(i).trigger("change")}i.focus(),e=!1}}e&&this._element.setAttribute("aria-pressed",!g(this._element).hasClass(b)),t&&g(this._element).toggleClass(b)},e.dispose=function(){g.removeData(this._element,_),this._element=null},t._jQueryInterface=function(e){return this.each(function(){var n=g(this).data(_);n||(n=new t(this),g(this).data(_,n)),"toggle"===e&&n[e]()})},i(t,null,[{key:"VERSION",get:function(){return"4.0.0"}}]),t}(),g(document).on(O.CLICK_DATA_API,w,function(t){t.preventDefault();var e=t.target;g(e).hasClass(T)||(e=g(e).closest(S)),N._jQueryInterface.call(g(e),"toggle")}).on(O.FOCUS_BLUR_DATA_API,w,function(t){var e=g(t.target).closest(S)[0];g(e).toggleClass(C,/^focus(in)?$/.test(t.type))}),g.fn[m]=N._jQueryInterface,g.fn[m].Constructor=N,g.fn[m].noConflict=function(){return g.fn[m]=y,N._jQueryInterface},N),x=function(t){var e="carousel",n="bs.carousel",o="."+n,s=t.fn[e],a={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0},l={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean"},c="next",h="prev",f="left",u="right",d={SLIDE:"slide"+o,SLID:"slid"+o,KEYDOWN:"keydown"+o,MOUSEENTER:"mouseenter"+o,MOUSELEAVE:"mouseleave"+o,TOUCHEND:"touchend"+o,LOAD_DATA_API:"load"+o+".data-api",CLICK_DATA_API:"click"+o+".data-api"},p="carousel",g="active",m="slide",_="carousel-item-right",v="carousel-item-left",E="carousel-item-next",y="carousel-item-prev",b={ACTIVE:".active",ACTIVE_ITEM:".active.carousel-item",ITEM:".carousel-item",NEXT_PREV:".carousel-item-next, .carousel-item-prev",INDICATORS:".carousel-indicators",DATA_SLIDE:"[data-slide], [data-slide-to]",DATA_RIDE:'[data-ride="carousel"]'},T=function(){function s(e,n){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this._config=this._getConfig(n),this._element=t(e)[0],this._indicatorsElement=t(this._element).find(b.INDICATORS)[0],this._addEventListeners()}var T=s.prototype;return T.next=function(){this._isSliding||this._slide(c)},T.nextWhenVisible=function(){!document.hidden&&t(this._element).is(":visible")&&"hidden"!==t(this._element).css("visibility")&&this.next()},T.prev=function(){this._isSliding||this._slide(h)},T.pause=function(e){e||(this._isPaused=!0),t(this._element).find(b.NEXT_PREV)[0]&&k.supportsTransitionEnd()&&(k.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},T.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},T.to=function(e){var n=this;this._activeElement=t(this._element).find(b.ACTIVE_ITEM)[0];var i=this._getItemIndex(this._activeElement);if(!(e>this._items.length-1||e<0))if(this._isSliding)t(this._element).one(d.SLID,function(){return n.to(e)});else{if(i===e)return this.pause(),void this.cycle();var r=e>i?c:h;this._slide(r,this._items[e])}},T.dispose=function(){t(this._element).off(o),t.removeData(this._element,n),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},T._getConfig=function(t){return t=r({},a,t),k.typeCheckConfig(e,t,l),t},T._addEventListeners=function(){var e=this;this._config.keyboard&&t(this._element).on(d.KEYDOWN,function(t){return e._keydown(t)}),"hover"===this._config.pause&&(t(this._element).on(d.MOUSEENTER,function(t){return e.pause(t)}).on(d.MOUSELEAVE,function(t){return e.cycle(t)}),"ontouchstart"in document.documentElement&&t(this._element).on(d.TOUCHEND,function(){e.pause(),e.touchTimeout&&clearTimeout(e.touchTimeout),e.touchTimeout=setTimeout(function(t){return e.cycle(t)},500+e._config.interval)}))},T._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},T._getItemIndex=function(e){return this._items=t.makeArray(t(e).parent().find(b.ITEM)),this._items.indexOf(e)},T._getItemByDirection=function(t,e){var n=t===c,i=t===h,r=this._getItemIndex(e),o=this._items.length-1;if((i&&0===r||n&&r===o)&&!this._config.wrap)return e;var s=(r+(t===h?-1:1))%this._items.length;return-1===s?this._items[this._items.length-1]:this._items[s]},T._triggerSlideEvent=function(e,n){var i=this._getItemIndex(e),r=this._getItemIndex(t(this._element).find(b.ACTIVE_ITEM)[0]),o=t.Event(d.SLIDE,{relatedTarget:e,direction:n,from:r,to:i});return t(this._element).trigger(o),o},T._setActiveIndicatorElement=function(e){if(this._indicatorsElement){t(this._indicatorsElement).find(b.ACTIVE).removeClass(g);var n=this._indicatorsElement.children[this._getItemIndex(e)];n&&t(n).addClass(g)}},T._slide=function(e,n){var i,r,o,s=this,a=t(this._element).find(b.ACTIVE_ITEM)[0],l=this._getItemIndex(a),h=n||a&&this._getItemByDirection(e,a),p=this._getItemIndex(h),T=Boolean(this._interval);if(e===c?(i=v,r=E,o=f):(i=_,r=y,o=u),h&&t(h).hasClass(g))this._isSliding=!1;else if(!this._triggerSlideEvent(h,o).isDefaultPrevented()&&a&&h){this._isSliding=!0,T&&this.pause(),this._setActiveIndicatorElement(h);var C=t.Event(d.SLID,{relatedTarget:h,direction:o,from:l,to:p});k.supportsTransitionEnd()&&t(this._element).hasClass(m)?(t(h).addClass(r),k.reflow(h),t(a).addClass(i),t(h).addClass(i),t(a).one(k.TRANSITION_END,function(){t(h).removeClass(i+" "+r).addClass(g),t(a).removeClass(g+" "+r+" "+i),s._isSliding=!1,setTimeout(function(){return t(s._element).trigger(C)},0)}).emulateTransitionEnd(600)):(t(a).removeClass(g),t(h).addClass(g),this._isSliding=!1,t(this._element).trigger(C)),T&&this.cycle()}},s._jQueryInterface=function(e){return this.each(function(){var i=t(this).data(n),o=r({},a,t(this).data());"object"==typeof e&&(o=r({},o,e));var l="string"==typeof e?e:o.slide;if(i||(i=new s(this,o),t(this).data(n,i)),"number"==typeof e)i.to(e);else if("string"==typeof l){if("undefined"==typeof i[l])throw new TypeError('No method named "'+l+'"');i[l]()}else o.interval&&(i.pause(),i.cycle())})},s._dataApiClickHandler=function(e){var i=k.getSelectorFromElement(this);if(i){var o=t(i)[0];if(o&&t(o).hasClass(p)){var a=r({},t(o).data(),t(this).data()),l=this.getAttribute("data-slide-to");l&&(a.interval=!1),s._jQueryInterface.call(t(o),a),l&&t(o).data(n).to(l),e.preventDefault()}}},i(s,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return a}}]),s}();return t(document).on(d.CLICK_DATA_API,b.DATA_SLIDE,T._dataApiClickHandler),t(window).on(d.LOAD_DATA_API,function(){t(b.DATA_RIDE).each(function(){var e=t(this);T._jQueryInterface.call(e,e.data())})}),t.fn[e]=T._jQueryInterface,t.fn[e].Constructor=T,t.fn[e].noConflict=function(){return t.fn[e]=s,T._jQueryInterface},T}(e),R=function(t){var e="collapse",n="bs.collapse",o="."+n,s=t.fn[e],a={toggle:!0,parent:""},l={toggle:"boolean",parent:"(string|element)"},c={SHOW:"show"+o,SHOWN:"shown"+o,HIDE:"hide"+o,HIDDEN:"hidden"+o,CLICK_DATA_API:"click"+o+".data-api"},h="show",f="collapse",u="collapsing",d="collapsed",p="width",g="height",m={ACTIVES:".show, .collapsing",DATA_TOGGLE:'[data-toggle="collapse"]'},_=function(){function o(e,n){this._isTransitioning=!1,this._element=e,this._config=this._getConfig(n),this._triggerArray=t.makeArray(t('[data-toggle="collapse"][href="#'+e.id+'"],[data-toggle="collapse"][data-target="#'+e.id+'"]'));for(var i=t(m.DATA_TOGGLE),r=0;r0&&(this._selector=s,this._triggerArray.push(o))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var s=o.prototype;return s.toggle=function(){t(this._element).hasClass(h)?this.hide():this.show()},s.show=function(){var e,i,r=this;if(!this._isTransitioning&&!t(this._element).hasClass(h)&&(this._parent&&0===(e=t.makeArray(t(this._parent).find(m.ACTIVES).filter('[data-parent="'+this._config.parent+'"]'))).length&&(e=null),!(e&&(i=t(e).not(this._selector).data(n))&&i._isTransitioning))){var s=t.Event(c.SHOW);if(t(this._element).trigger(s),!s.isDefaultPrevented()){e&&(o._jQueryInterface.call(t(e).not(this._selector),"hide"),i||t(e).data(n,null));var a=this._getDimension();t(this._element).removeClass(f).addClass(u),this._element.style[a]=0,this._triggerArray.length>0&&t(this._triggerArray).removeClass(d).attr("aria-expanded",!0),this.setTransitioning(!0);var l=function(){t(r._element).removeClass(u).addClass(f).addClass(h),r._element.style[a]="",r.setTransitioning(!1),t(r._element).trigger(c.SHOWN)};if(k.supportsTransitionEnd()){var p="scroll"+(a[0].toUpperCase()+a.slice(1));t(this._element).one(k.TRANSITION_END,l).emulateTransitionEnd(600),this._element.style[a]=this._element[p]+"px"}else l()}}},s.hide=function(){var e=this;if(!this._isTransitioning&&t(this._element).hasClass(h)){var n=t.Event(c.HIDE);if(t(this._element).trigger(n),!n.isDefaultPrevented()){var i=this._getDimension();if(this._element.style[i]=this._element.getBoundingClientRect()[i]+"px",k.reflow(this._element),t(this._element).addClass(u).removeClass(f).removeClass(h),this._triggerArray.length>0)for(var r=0;r0&&t(n).toggleClass(d,!i).attr("aria-expanded",i)}},o._getTargetFromElement=function(e){var n=k.getSelectorFromElement(e);return n?t(n)[0]:null},o._jQueryInterface=function(e){return this.each(function(){var i=t(this),s=i.data(n),l=r({},a,i.data(),"object"==typeof e&&e);if(!s&&l.toggle&&/show|hide/.test(e)&&(l.toggle=!1),s||(s=new o(this,l),i.data(n,s)),"string"==typeof e){if("undefined"==typeof s[e])throw new TypeError('No method named "'+e+'"');s[e]()}})},i(o,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return a}}]),o}();return t(document).on(c.CLICK_DATA_API,m.DATA_TOGGLE,function(e){"A"===e.currentTarget.tagName&&e.preventDefault();var i=t(this),r=k.getSelectorFromElement(this);t(r).each(function(){var e=t(this),r=e.data(n)?"toggle":i.data();_._jQueryInterface.call(e,r)})}),t.fn[e]=_._jQueryInterface,t.fn[e].Constructor=_,t.fn[e].noConflict=function(){return t.fn[e]=s,_._jQueryInterface},_}(e),j="undefined"!=typeof window&&"undefined"!=typeof document,H=["Edge","Trident","Firefox"],M=0,W=0;W=0){M=1;break}var U=j&&window.Promise?function(t){var e=!1;return function(){e||(e=!0,window.Promise.resolve().then(function(){e=!1,t()}))}}:function(t){var e=!1;return function(){e||(e=!0,setTimeout(function(){e=!1,t()},M))}};function B(t){return t&&"[object Function]"==={}.toString.call(t)}function F(t,e){if(1!==t.nodeType)return[];var n=getComputedStyle(t,null);return e?n[e]:n}function K(t){return"HTML"===t.nodeName?t:t.parentNode||t.host}function V(t){if(!t)return document.body;switch(t.nodeName){case"HTML":case"BODY":return t.ownerDocument.body;case"#document":return t.body}var e=F(t),n=e.overflow,i=e.overflowX,r=e.overflowY;return/(auto|scroll)/.test(n+r+i)?t:V(K(t))}function Q(t){var e=t&&t.offsetParent,n=e&&e.nodeName;return n&&"BODY"!==n&&"HTML"!==n?-1!==["TD","TABLE"].indexOf(e.nodeName)&&"static"===F(e,"position")?Q(e):e:t?t.ownerDocument.documentElement:document.documentElement}function Y(t){return null!==t.parentNode?Y(t.parentNode):t}function G(t,e){if(!(t&&t.nodeType&&e&&e.nodeType))return document.documentElement;var n=t.compareDocumentPosition(e)&Node.DOCUMENT_POSITION_FOLLOWING,i=n?t:e,r=n?e:t,o=document.createRange();o.setStart(i,0),o.setEnd(r,0);var s,a,l=o.commonAncestorContainer;if(t!==l&&e!==l||i.contains(r))return"BODY"===(a=(s=l).nodeName)||"HTML"!==a&&Q(s.firstElementChild)!==s?Q(l):l;var c=Y(t);return c.host?G(c.host,e):G(t,Y(e).host)}function q(t){var e="top"===(arguments.length>1&&void 0!==arguments[1]?arguments[1]:"top")?"scrollTop":"scrollLeft",n=t.nodeName;if("BODY"===n||"HTML"===n){var i=t.ownerDocument.documentElement;return(t.ownerDocument.scrollingElement||i)[e]}return t[e]}function z(t,e){var n="x"===e?"Left":"Top",i="Left"===n?"Right":"Bottom";return parseFloat(t["border"+n+"Width"],10)+parseFloat(t["border"+i+"Width"],10)}var X=void 0,Z=function(){return void 0===X&&(X=-1!==navigator.appVersion.indexOf("MSIE 10")),X};function J(t,e,n,i){return Math.max(e["offset"+t],e["scroll"+t],n["client"+t],n["offset"+t],n["scroll"+t],Z()?n["offset"+t]+i["margin"+("Height"===t?"Top":"Left")]+i["margin"+("Height"===t?"Bottom":"Right")]:0)}function $(){var t=document.body,e=document.documentElement,n=Z()&&getComputedStyle(e);return{height:J("Height",t,e,n),width:J("Width",t,e,n)}}var tt=function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")},et=function(){function t(t,e){for(var n=0;n2&&void 0!==arguments[2]&&arguments[2],i=q(e,"top"),r=q(e,"left"),o=n?-1:1;return t.top+=i*o,t.bottom+=i*o,t.left+=r*o,t.right+=r*o,t}(h,e)),h}function at(t,e,n,i){var r,o,s,a,l,c,h,f={top:0,left:0},u=G(t,e);if("viewport"===i)o=(r=u).ownerDocument.documentElement,s=st(r,o),a=Math.max(o.clientWidth,window.innerWidth||0),l=Math.max(o.clientHeight,window.innerHeight||0),c=q(o),h=q(o,"left"),f=rt({top:c-s.top+s.marginTop,left:h-s.left+s.marginLeft,width:a,height:l});else{var d=void 0;"scrollParent"===i?"BODY"===(d=V(K(e))).nodeName&&(d=t.ownerDocument.documentElement):d="window"===i?t.ownerDocument.documentElement:i;var p=st(d,u);if("HTML"!==d.nodeName||function t(e){var n=e.nodeName;return"BODY"!==n&&"HTML"!==n&&("fixed"===F(e,"position")||t(K(e)))}(u))f=p;else{var g=$(),m=g.height,_=g.width;f.top+=p.top-p.marginTop,f.bottom=m+p.top,f.left+=p.left-p.marginLeft,f.right=_+p.left}}return f.left+=n,f.top+=n,f.right-=n,f.bottom-=n,f}function lt(t,e,n,i,r){var o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:0;if(-1===t.indexOf("auto"))return t;var s=at(n,i,o,r),a={top:{width:s.width,height:e.top-s.top},right:{width:s.right-e.right,height:s.height},bottom:{width:s.width,height:s.bottom-e.bottom},left:{width:e.left-s.left,height:s.height}},l=Object.keys(a).map(function(t){return it({key:t},a[t],{area:(e=a[t],e.width*e.height)});var e}).sort(function(t,e){return e.area-t.area}),c=l.filter(function(t){var e=t.width,i=t.height;return e>=n.clientWidth&&i>=n.clientHeight}),h=c.length>0?c[0].key:l[0].key,f=t.split("-")[1];return h+(f?"-"+f:"")}function ct(t,e,n){return st(n,G(e,n))}function ht(t){var e=getComputedStyle(t),n=parseFloat(e.marginTop)+parseFloat(e.marginBottom),i=parseFloat(e.marginLeft)+parseFloat(e.marginRight);return{width:t.offsetWidth+i,height:t.offsetHeight+n}}function ft(t){var e={left:"right",right:"left",bottom:"top",top:"bottom"};return t.replace(/left|right|bottom|top/g,function(t){return e[t]})}function ut(t,e,n){n=n.split("-")[0];var i=ht(t),r={width:i.width,height:i.height},o=-1!==["right","left"].indexOf(n),s=o?"top":"left",a=o?"left":"top",l=o?"height":"width",c=o?"width":"height";return r[s]=e[s]+e[l]/2-i[l]/2,r[a]=n===a?e[a]-i[c]:e[ft(a)],r}function dt(t,e){return Array.prototype.find?t.find(e):t.filter(e)[0]}function pt(t,e,n){return(void 0===n?t:t.slice(0,function(t,e,n){if(Array.prototype.findIndex)return t.findIndex(function(t){return t[e]===n});var i=dt(t,function(t){return t[e]===n});return t.indexOf(i)}(t,"name",n))).forEach(function(t){t.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var n=t.function||t.fn;t.enabled&&B(n)&&(e.offsets.popper=rt(e.offsets.popper),e.offsets.reference=rt(e.offsets.reference),e=n(e,t))}),e}function gt(t,e){return t.some(function(t){var n=t.name;return t.enabled&&n===e})}function mt(t){for(var e=[!1,"ms","Webkit","Moz","O"],n=t.charAt(0).toUpperCase()+t.slice(1),i=0;i1&&void 0!==arguments[1]&&arguments[1],n=wt.indexOf(t),i=wt.slice(n+1).concat(wt.slice(0,n));return e?i.reverse():i}var At={FLIP:"flip",CLOCKWISE:"clockwise",COUNTERCLOCKWISE:"counterclockwise"};function Dt(t,e,n,i){var r=[0,0],o=-1!==["right","left"].indexOf(i),s=t.split(/(\+|\-)/).map(function(t){return t.trim()}),a=s.indexOf(dt(s,function(t){return-1!==t.search(/,|\s/)}));s[a]&&-1===s[a].indexOf(",")&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead.");var l=/\s*,\s*|\s+/,c=-1!==a?[s.slice(0,a).concat([s[a].split(l)[0]]),[s[a].split(l)[1]].concat(s.slice(a+1))]:[s];return(c=c.map(function(t,i){var r=(1===i?!o:o)?"height":"width",s=!1;return t.reduce(function(t,e){return""===t[t.length-1]&&-1!==["+","-"].indexOf(e)?(t[t.length-1]=e,s=!0,t):s?(t[t.length-1]+=e,s=!1,t):t.concat(e)},[]).map(function(t){return function(t,e,n,i){var r=t.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),o=+r[1],s=r[2];if(!o)return t;if(0===s.indexOf("%")){var a=void 0;switch(s){case"%p":a=n;break;case"%":case"%r":default:a=i}return rt(a)[e]/100*o}if("vh"===s||"vw"===s)return("vh"===s?Math.max(document.documentElement.clientHeight,window.innerHeight||0):Math.max(document.documentElement.clientWidth,window.innerWidth||0))/100*o;return o}(t,r,e,n)})})).forEach(function(t,e){t.forEach(function(n,i){yt(n)&&(r[e]+=n*("-"===t[i-1]?-1:1))})}),r}var St={placement:"bottom",eventsEnabled:!0,removeOnDestroy:!1,onCreate:function(){},onUpdate:function(){},modifiers:{shift:{order:100,enabled:!0,fn:function(t){var e=t.placement,n=e.split("-")[0],i=e.split("-")[1];if(i){var r=t.offsets,o=r.reference,s=r.popper,a=-1!==["bottom","top"].indexOf(n),l=a?"left":"top",c=a?"width":"height",h={start:nt({},l,o[l]),end:nt({},l,o[l]+o[c]-s[c])};t.offsets.popper=it({},s,h[i])}return t}},offset:{order:200,enabled:!0,fn:function(t,e){var n=e.offset,i=t.placement,r=t.offsets,o=r.popper,s=r.reference,a=i.split("-")[0],l=void 0;return l=yt(+n)?[+n,0]:Dt(n,o,s,a),"left"===a?(o.top+=l[0],o.left-=l[1]):"right"===a?(o.top+=l[0],o.left+=l[1]):"top"===a?(o.left+=l[0],o.top-=l[1]):"bottom"===a&&(o.left+=l[0],o.top+=l[1]),t.popper=o,t},offset:0},preventOverflow:{order:300,enabled:!0,fn:function(t,e){var n=e.boundariesElement||Q(t.instance.popper);t.instance.reference===n&&(n=Q(n));var i=at(t.instance.popper,t.instance.reference,e.padding,n);e.boundaries=i;var r=e.priority,o=t.offsets.popper,s={primary:function(t){var n=o[t];return o[t]i[t]&&!e.escapeWithReference&&(r=Math.min(o[n],i[t]-("right"===t?o.width:o.height))),nt({},n,r)}};return r.forEach(function(t){var e=-1!==["left","top"].indexOf(t)?"primary":"secondary";o=it({},o,s[e](t))}),t.offsets.popper=o,t},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(t){var e=t.offsets,n=e.popper,i=e.reference,r=t.placement.split("-")[0],o=Math.floor,s=-1!==["top","bottom"].indexOf(r),a=s?"right":"bottom",l=s?"left":"top",c=s?"width":"height";return n[a]o(i[a])&&(t.offsets.popper[l]=o(i[a])),t}},arrow:{order:500,enabled:!0,fn:function(t,e){var n;if(!Tt(t.instance.modifiers,"arrow","keepTogether"))return t;var i=e.element;if("string"==typeof i){if(!(i=t.instance.popper.querySelector(i)))return t}else if(!t.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),t;var r=t.placement.split("-")[0],o=t.offsets,s=o.popper,a=o.reference,l=-1!==["left","right"].indexOf(r),c=l?"height":"width",h=l?"Top":"Left",f=h.toLowerCase(),u=l?"left":"top",d=l?"bottom":"right",p=ht(i)[c];a[d]-ps[d]&&(t.offsets.popper[f]+=a[f]+p-s[d]),t.offsets.popper=rt(t.offsets.popper);var g=a[f]+a[c]/2-p/2,m=F(t.instance.popper),_=parseFloat(m["margin"+h],10),v=parseFloat(m["border"+h+"Width"],10),E=g-t.offsets.popper[f]-_-v;return E=Math.max(Math.min(s[c]-p,E),0),t.arrowElement=i,t.offsets.arrow=(nt(n={},f,Math.round(E)),nt(n,u,""),n),t},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(t,e){if(gt(t.instance.modifiers,"inner"))return t;if(t.flipped&&t.placement===t.originalPlacement)return t;var n=at(t.instance.popper,t.instance.reference,e.padding,e.boundariesElement),i=t.placement.split("-")[0],r=ft(i),o=t.placement.split("-")[1]||"",s=[];switch(e.behavior){case At.FLIP:s=[i,r];break;case At.CLOCKWISE:s=It(i);break;case At.COUNTERCLOCKWISE:s=It(i,!0);break;default:s=e.behavior}return s.forEach(function(a,l){if(i!==a||s.length===l+1)return t;i=t.placement.split("-")[0],r=ft(i);var c,h=t.offsets.popper,f=t.offsets.reference,u=Math.floor,d="left"===i&&u(h.right)>u(f.left)||"right"===i&&u(h.left)u(f.top)||"bottom"===i&&u(h.top)u(n.right),m=u(h.top)u(n.bottom),v="left"===i&&p||"right"===i&&g||"top"===i&&m||"bottom"===i&&_,E=-1!==["top","bottom"].indexOf(i),y=!!e.flipVariations&&(E&&"start"===o&&p||E&&"end"===o&&g||!E&&"start"===o&&m||!E&&"end"===o&&_);(d||v||y)&&(t.flipped=!0,(d||v)&&(i=s[l+1]),y&&(o="end"===(c=o)?"start":"start"===c?"end":c),t.placement=i+(o?"-"+o:""),t.offsets.popper=it({},t.offsets.popper,ut(t.instance.popper,t.offsets.reference,t.placement)),t=pt(t.instance.modifiers,t,"flip"))}),t},behavior:"flip",padding:5,boundariesElement:"viewport"},inner:{order:700,enabled:!1,fn:function(t){var e=t.placement,n=e.split("-")[0],i=t.offsets,r=i.popper,o=i.reference,s=-1!==["left","right"].indexOf(n),a=-1===["top","left"].indexOf(n);return r[s?"left":"top"]=o[n]-(a?r[s?"width":"height"]:0),t.placement=ft(e),t.offsets.popper=rt(r),t}},hide:{order:800,enabled:!0,fn:function(t){if(!Tt(t.instance.modifiers,"hide","preventOverflow"))return t;var e=t.offsets.reference,n=dt(t.instance.modifiers,function(t){return"preventOverflow"===t.name}).boundaries;if(e.bottomn.right||e.top>n.bottom||e.right2&&void 0!==arguments[2]?arguments[2]:{};tt(this,t),this.scheduleUpdate=function(){return requestAnimationFrame(i.update)},this.update=U(this.update.bind(this)),this.options=it({},t.Defaults,r),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=e&&e.jquery?e[0]:e,this.popper=n&&n.jquery?n[0]:n,this.options.modifiers={},Object.keys(it({},t.Defaults.modifiers,r.modifiers)).forEach(function(e){i.options.modifiers[e]=it({},t.Defaults.modifiers[e]||{},r.modifiers?r.modifiers[e]:{})}),this.modifiers=Object.keys(this.options.modifiers).map(function(t){return it({name:t},i.options.modifiers[t])}).sort(function(t,e){return t.order-e.order}),this.modifiers.forEach(function(t){t.enabled&&B(t.onLoad)&&t.onLoad(i.reference,i.popper,i.options,t,i.state)}),this.update();var o=this.options.eventsEnabled;o&&this.enableEventListeners(),this.state.eventsEnabled=o}return et(t,[{key:"update",value:function(){return function(){if(!this.state.isDestroyed){var t={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};t.offsets.reference=ct(this.state,this.popper,this.reference),t.placement=lt(this.options.placement,t.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),t.originalPlacement=t.placement,t.offsets.popper=ut(this.popper,t.offsets.reference,t.placement),t.offsets.popper.position="absolute",t=pt(this.modifiers,t),this.state.isCreated?this.options.onUpdate(t):(this.state.isCreated=!0,this.options.onCreate(t))}}.call(this)}},{key:"destroy",value:function(){return function(){return this.state.isDestroyed=!0,gt(this.modifiers,"applyStyle")&&(this.popper.removeAttribute("x-placement"),this.popper.style.left="",this.popper.style.position="",this.popper.style.top="",this.popper.style[mt("transform")]=""),this.disableEventListeners(),this.options.removeOnDestroy&&this.popper.parentNode.removeChild(this.popper),this}.call(this)}},{key:"enableEventListeners",value:function(){return function(){this.state.eventsEnabled||(this.state=vt(this.reference,this.options,this.state,this.scheduleUpdate))}.call(this)}},{key:"disableEventListeners",value:function(){return Et.call(this)}}]),t}();Ot.Utils=("undefined"!=typeof window?window:global).PopperUtils,Ot.placements=Ct,Ot.Defaults=St;var Nt=function(t){var e="dropdown",n="bs.dropdown",o="."+n,s=t.fn[e],a=new RegExp("38|40|27"),l={HIDE:"hide"+o,HIDDEN:"hidden"+o,SHOW:"show"+o,SHOWN:"shown"+o,CLICK:"click"+o,CLICK_DATA_API:"click"+o+".data-api",KEYDOWN_DATA_API:"keydown"+o+".data-api",KEYUP_DATA_API:"keyup"+o+".data-api"},c="disabled",h="show",f="dropup",u="dropright",d="dropleft",p="dropdown-menu-right",g="dropdown-menu-left",m="position-static",_='[data-toggle="dropdown"]',v=".dropdown form",E=".dropdown-menu",y=".navbar-nav",b=".dropdown-menu .dropdown-item:not(.disabled)",T="top-start",C="top-end",w="bottom-start",I="bottom-end",A="right-start",D="left-start",S={offset:0,flip:!0,boundary:"scrollParent"},O={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)"},N=function(){function s(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var v=s.prototype;return v.toggle=function(){if(!this._element.disabled&&!t(this._element).hasClass(c)){var e=s._getParentFromElement(this._element),n=t(this._menu).hasClass(h);if(s._clearMenus(),!n){var i={relatedTarget:this._element},r=t.Event(l.SHOW,i);if(t(e).trigger(r),!r.isDefaultPrevented()){if(!this._inNavbar){if("undefined"==typeof Ot)throw new TypeError("Bootstrap dropdown require Popper.js (https://popper.js.org)");var o=this._element;t(e).hasClass(f)&&(t(this._menu).hasClass(g)||t(this._menu).hasClass(p))&&(o=e),"scrollParent"!==this._config.boundary&&t(e).addClass(m),this._popper=new Ot(o,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===t(e).closest(y).length&&t("body").children().on("mouseover",null,t.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),t(this._menu).toggleClass(h),t(e).toggleClass(h).trigger(t.Event(l.SHOWN,i))}}}},v.dispose=function(){t.removeData(this._element,n),t(this._element).off(o),this._element=null,this._menu=null,null!==this._popper&&(this._popper.destroy(),this._popper=null)},v.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},v._addEventListeners=function(){var e=this;t(this._element).on(l.CLICK,function(t){t.preventDefault(),t.stopPropagation(),e.toggle()})},v._getConfig=function(n){return n=r({},this.constructor.Default,t(this._element).data(),n),k.typeCheckConfig(e,n,this.constructor.DefaultType),n},v._getMenuElement=function(){if(!this._menu){var e=s._getParentFromElement(this._element);this._menu=t(e).find(E)[0]}return this._menu},v._getPlacement=function(){var e=t(this._element).parent(),n=w;return e.hasClass(f)?(n=T,t(this._menu).hasClass(p)&&(n=C)):e.hasClass(u)?n=A:e.hasClass(d)?n=D:t(this._menu).hasClass(p)&&(n=I),n},v._detectNavbar=function(){return t(this._element).closest(".navbar").length>0},v._getPopperConfig=function(){var t=this,e={};return"function"==typeof this._config.offset?e.fn=function(e){return e.offsets=r({},e.offsets,t._config.offset(e.offsets)||{}),e}:e.offset=this._config.offset,{placement:this._getPlacement(),modifiers:{offset:e,flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}}},s._jQueryInterface=function(e){return this.each(function(){var i=t(this).data(n);if(i||(i=new s(this,"object"==typeof e?e:null),t(this).data(n,i)),"string"==typeof e){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}})},s._clearMenus=function(e){if(!e||3!==e.which&&("keyup"!==e.type||9===e.which))for(var i=t.makeArray(t(_)),r=0;r0&&o--,40===e.which&&odocument.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},g._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},g._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=t.left+t.right
',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent"},f="show",u="out",d={HIDE:"hide"+o,HIDDEN:"hidden"+o,SHOW:"show"+o,SHOWN:"shown"+o,INSERTED:"inserted"+o,CLICK:"click"+o,FOCUSIN:"focusin"+o,FOCUSOUT:"focusout"+o,MOUSEENTER:"mouseenter"+o,MOUSELEAVE:"mouseleave"+o},p="fade",g="show",m=".tooltip-inner",_=".arrow",v="hover",E="focus",y="click",b="manual",T=function(){function s(t,e){if("undefined"==typeof Ot)throw new TypeError("Bootstrap tooltips require Popper.js (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var T=s.prototype;return T.enable=function(){this._isEnabled=!0},T.disable=function(){this._isEnabled=!1},T.toggleEnabled=function(){this._isEnabled=!this._isEnabled},T.toggle=function(e){if(this._isEnabled)if(e){var n=this.constructor.DATA_KEY,i=t(e.currentTarget).data(n);i||(i=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(n,i)),i._activeTrigger.click=!i._activeTrigger.click,i._isWithActiveTrigger()?i._enter(null,i):i._leave(null,i)}else{if(t(this.getTipElement()).hasClass(g))return void this._leave(null,this);this._enter(null,this)}},T.dispose=function(){clearTimeout(this._timeout),t.removeData(this.element,this.constructor.DATA_KEY),t(this.element).off(this.constructor.EVENT_KEY),t(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&t(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,null!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},T.show=function(){var e=this;if("none"===t(this.element).css("display"))throw new Error("Please use show on visible elements");var n=t.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){t(this.element).trigger(n);var i=t.contains(this.element.ownerDocument.documentElement,this.element);if(n.isDefaultPrevented()||!i)return;var r=this.getTipElement(),o=k.getUID(this.constructor.NAME);r.setAttribute("id",o),this.element.setAttribute("aria-describedby",o),this.setContent(),this.config.animation&&t(r).addClass(p);var a="function"==typeof this.config.placement?this.config.placement.call(this,r,this.element):this.config.placement,l=this._getAttachment(a);this.addAttachmentClass(l);var c=!1===this.config.container?document.body:t(this.config.container);t(r).data(this.constructor.DATA_KEY,this),t.contains(this.element.ownerDocument.documentElement,this.tip)||t(r).appendTo(c),t(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new Ot(this.element,r,{placement:l,modifiers:{offset:{offset:this.config.offset},flip:{behavior:this.config.fallbackPlacement},arrow:{element:_},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){e._handlePopperPlacementChange(t)}}),t(r).addClass(g),"ontouchstart"in document.documentElement&&t("body").children().on("mouseover",null,t.noop);var h=function(){e.config.animation&&e._fixTransition();var n=e._hoverState;e._hoverState=null,t(e.element).trigger(e.constructor.Event.SHOWN),n===u&&e._leave(null,e)};k.supportsTransitionEnd()&&t(this.tip).hasClass(p)?t(this.tip).one(k.TRANSITION_END,h).emulateTransitionEnd(s._TRANSITION_DURATION):h()}},T.hide=function(e){var n=this,i=this.getTipElement(),r=t.Event(this.constructor.Event.HIDE),o=function(){n._hoverState!==f&&i.parentNode&&i.parentNode.removeChild(i),n._cleanTipClass(),n.element.removeAttribute("aria-describedby"),t(n.element).trigger(n.constructor.Event.HIDDEN),null!==n._popper&&n._popper.destroy(),e&&e()};t(this.element).trigger(r),r.isDefaultPrevented()||(t(i).removeClass(g),"ontouchstart"in document.documentElement&&t("body").children().off("mouseover",null,t.noop),this._activeTrigger[y]=!1,this._activeTrigger[E]=!1,this._activeTrigger[v]=!1,k.supportsTransitionEnd()&&t(this.tip).hasClass(p)?t(i).one(k.TRANSITION_END,o).emulateTransitionEnd(150):o(),this._hoverState="")},T.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},T.isWithContent=function(){return Boolean(this.getTitle())},T.addAttachmentClass=function(e){t(this.getTipElement()).addClass("bs-tooltip-"+e)},T.getTipElement=function(){return this.tip=this.tip||t(this.config.template)[0],this.tip},T.setContent=function(){var e=t(this.getTipElement());this.setElementContent(e.find(m),this.getTitle()),e.removeClass(p+" "+g)},T.setElementContent=function(e,n){var i=this.config.html;"object"==typeof n&&(n.nodeType||n.jquery)?i?t(n).parent().is(e)||e.empty().append(n):e.text(t(n).text()):e[i?"html":"text"](n)},T.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},T._getAttachment=function(t){return c[t.toUpperCase()]},T._setListeners=function(){var e=this;this.config.trigger.split(" ").forEach(function(n){if("click"===n)t(e.element).on(e.constructor.Event.CLICK,e.config.selector,function(t){return e.toggle(t)});else if(n!==b){var i=n===v?e.constructor.Event.MOUSEENTER:e.constructor.Event.FOCUSIN,r=n===v?e.constructor.Event.MOUSELEAVE:e.constructor.Event.FOCUSOUT;t(e.element).on(i,e.config.selector,function(t){return e._enter(t)}).on(r,e.config.selector,function(t){return e._leave(t)})}t(e.element).closest(".modal").on("hide.bs.modal",function(){return e.hide()})}),this.config.selector?this.config=r({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},T._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},T._enter=function(e,n){var i=this.constructor.DATA_KEY;(n=n||t(e.currentTarget).data(i))||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(i,n)),e&&(n._activeTrigger["focusin"===e.type?E:v]=!0),t(n.getTipElement()).hasClass(g)||n._hoverState===f?n._hoverState=f:(clearTimeout(n._timeout),n._hoverState=f,n.config.delay&&n.config.delay.show?n._timeout=setTimeout(function(){n._hoverState===f&&n.show()},n.config.delay.show):n.show())},T._leave=function(e,n){var i=this.constructor.DATA_KEY;(n=n||t(e.currentTarget).data(i))||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(i,n)),e&&(n._activeTrigger["focusout"===e.type?E:v]=!1),n._isWithActiveTrigger()||(clearTimeout(n._timeout),n._hoverState=u,n.config.delay&&n.config.delay.hide?n._timeout=setTimeout(function(){n._hoverState===u&&n.hide()},n.config.delay.hide):n.hide())},T._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},T._getConfig=function(n){return"number"==typeof(n=r({},this.constructor.Default,t(this.element).data(),n)).delay&&(n.delay={show:n.delay,hide:n.delay}),"number"==typeof n.title&&(n.title=n.title.toString()),"number"==typeof n.content&&(n.content=n.content.toString()),k.typeCheckConfig(e,n,this.constructor.DefaultType),n},T._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},T._cleanTipClass=function(){var e=t(this.getTipElement()),n=e.attr("class").match(a);null!==n&&n.length>0&&e.removeClass(n.join(""))},T._handlePopperPlacementChange=function(t){this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},T._fixTransition=function(){var e=this.getTipElement(),n=this.config.animation;null===e.getAttribute("x-placement")&&(t(e).removeClass(p),this.config.animation=!1,this.hide(),this.show(),this.config.animation=n)},s._jQueryInterface=function(e){return this.each(function(){var i=t(this).data(n),r="object"==typeof e&&e;if((i||!/dispose|hide/.test(e))&&(i||(i=new s(this,r),t(this).data(n,i)),"string"==typeof e)){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}})},i(s,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return h}},{key:"NAME",get:function(){return e}},{key:"DATA_KEY",get:function(){return n}},{key:"Event",get:function(){return d}},{key:"EVENT_KEY",get:function(){return o}},{key:"DefaultType",get:function(){return l}}]),s}();return t.fn[e]=T._jQueryInterface,t.fn[e].Constructor=T,t.fn[e].noConflict=function(){return t.fn[e]=s,T._jQueryInterface},T}(e),Pt=function(t){var e="popover",n="bs.popover",o="."+n,s=t.fn[e],a=new RegExp("(^|\\s)bs-popover\\S+","g"),l=r({},Lt.Default,{placement:"right",trigger:"click",content:"",template:''}),c=r({},Lt.DefaultType,{content:"(string|element|function)"}),h="fade",f="show",u=".popover-header",d=".popover-body",p={HIDE:"hide"+o,HIDDEN:"hidden"+o,SHOW:"show"+o,SHOWN:"shown"+o,INSERTED:"inserted"+o,CLICK:"click"+o,FOCUSIN:"focusin"+o,FOCUSOUT:"focusout"+o,MOUSEENTER:"mouseenter"+o,MOUSELEAVE:"mouseleave"+o},g=function(r){var s,g;function m(){return r.apply(this,arguments)||this}g=r,(s=m).prototype=Object.create(g.prototype),s.prototype.constructor=s,s.__proto__=g;var _=m.prototype;return _.isWithContent=function(){return this.getTitle()||this._getContent()},_.addAttachmentClass=function(e){t(this.getTipElement()).addClass("bs-popover-"+e)},_.getTipElement=function(){return this.tip=this.tip||t(this.config.template)[0],this.tip},_.setContent=function(){var e=t(this.getTipElement());this.setElementContent(e.find(u),this.getTitle());var n=this._getContent();"function"==typeof n&&(n=n.call(this.element)),this.setElementContent(e.find(d),n),e.removeClass(h+" "+f)},_._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},_._cleanTipClass=function(){var e=t(this.getTipElement()),n=e.attr("class").match(a);null!==n&&n.length>0&&e.removeClass(n.join(""))},m._jQueryInterface=function(e){return this.each(function(){var i=t(this).data(n),r="object"==typeof e?e:null;if((i||!/destroy|hide/.test(e))&&(i||(i=new m(this,r),t(this).data(n,i)),"string"==typeof e)){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}})},i(m,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return l}},{key:"NAME",get:function(){return e}},{key:"DATA_KEY",get:function(){return n}},{key:"Event",get:function(){return p}},{key:"EVENT_KEY",get:function(){return o}},{key:"DefaultType",get:function(){return c}}]),m}(Lt);return t.fn[e]=g._jQueryInterface,t.fn[e].Constructor=g,t.fn[e].noConflict=function(){return t.fn[e]=s,g._jQueryInterface},g}(e),xt=function(t){var e="scrollspy",n="bs.scrollspy",o="."+n,s=t.fn[e],a={offset:10,method:"auto",target:""},l={offset:"number",method:"string",target:"(string|element)"},c={ACTIVATE:"activate"+o,SCROLL:"scroll"+o,LOAD_DATA_API:"load"+o+".data-api"},h="dropdown-item",f="active",u={DATA_SPY:'[data-spy="scroll"]',ACTIVE:".active",NAV_LIST_GROUP:".nav, .list-group",NAV_LINKS:".nav-link",NAV_ITEMS:".nav-item",LIST_ITEMS:".list-group-item",DROPDOWN:".dropdown",DROPDOWN_ITEMS:".dropdown-item",DROPDOWN_TOGGLE:".dropdown-toggle"},d="offset",p="position",g=function(){function s(e,n){var i=this;this._element=e,this._scrollElement="BODY"===e.tagName?window:e,this._config=this._getConfig(n),this._selector=this._config.target+" "+u.NAV_LINKS+","+this._config.target+" "+u.LIST_ITEMS+","+this._config.target+" "+u.DROPDOWN_ITEMS,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,t(this._scrollElement).on(c.SCROLL,function(t){return i._process(t)}),this.refresh(),this._process()}var g=s.prototype;return g.refresh=function(){var e=this,n=this._scrollElement===this._scrollElement.window?d:p,i="auto"===this._config.method?n:this._config.method,r=i===p?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),t.makeArray(t(this._selector)).map(function(e){var n,o=k.getSelectorFromElement(e);if(o&&(n=t(o)[0]),n){var s=n.getBoundingClientRect();if(s.width||s.height)return[t(n)[i]().top+r,o]}return null}).filter(function(t){return t}).sort(function(t,e){return t[0]-e[0]}).forEach(function(t){e._offsets.push(t[0]),e._targets.push(t[1])})},g.dispose=function(){t.removeData(this._element,n),t(this._scrollElement).off(o),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},g._getConfig=function(n){if("string"!=typeof(n=r({},a,n)).target){var i=t(n.target).attr("id");i||(i=k.getUID(e),t(n.target).attr("id",i)),n.target="#"+i}return k.typeCheckConfig(e,n,l),n},g._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},g._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},g._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},g._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=n){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t0)return this._activeTarget=null,void this._clear();for(var r=this._offsets.length;r--;){this._activeTarget!==this._targets[r]&&t>=this._offsets[r]&&("undefined"==typeof this._offsets[r+1]||t=4)throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(e),t.Util=k,t.Alert=L,t.Button=P,t.Carousel=x,t.Collapse=R,t.Dropdown=Nt,t.Modal=kt,t.Popover=Pt,t.Scrollspy=xt,t.Tab=Rt,t.Tooltip=Lt,Object.defineProperty(t,"__esModule",{value:!0})}); +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],t):t(e.bootstrap={},e.jQuery)}(this,function(e,t){"use strict";function i(e,t){for(var n=0;nthis._items.length-1||e<0))if(this._isSliding)k(this._element).one(q.SLID,function(){return t.to(e)});else{if(n===e)return this.pause(),void this.cycle();var i=n=i.clientWidth&&n>=i.clientHeight}),u=0l[e]&&!i.escapeWithReference&&(n=Math.min(u[t],l[e]-("right"===e?u.width:u.height))),Ve({},t,n)}};return c.forEach(function(e){var t=-1!==["left","top"].indexOf(e)?"primary":"secondary";u=ze({},u,f[t](e))}),e.offsets.popper=u,e},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(e){var t=e.offsets,n=t.popper,i=t.reference,r=e.placement.split("-")[0],o=Math.floor,s=-1!==["top","bottom"].indexOf(r),a=s?"right":"bottom",l=s?"left":"top",c=s?"width":"height";return n[a]o(i[a])&&(e.offsets.popper[l]=o(i[a])),e}},arrow:{order:500,enabled:!0,fn:function(e,t){var n;if(!pt(e.instance.modifiers,"arrow","keepTogether"))return e;var i=t.element;if("string"==typeof i){if(!(i=e.instance.popper.querySelector(i)))return e}else if(!e.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),e;var r=e.placement.split("-")[0],o=e.offsets,s=o.popper,a=o.reference,l=-1!==["left","right"].indexOf(r),c=l?"height":"width",u=l?"Top":"Left",f=u.toLowerCase(),h=l?"left":"top",d=l?"bottom":"right",p=nt(i)[c];a[d]-ps[d]&&(e.offsets.popper[f]+=a[f]+p-s[d]),e.offsets.popper=Ge(e.offsets.popper);var m=a[f]+a[c]/2-p/2,g=Pe(e.instance.popper),_=parseFloat(g["margin"+u],10),v=parseFloat(g["border"+u+"Width"],10),y=m-e.offsets.popper[f]-_-v;return y=Math.max(Math.min(s[c]-p,y),0),e.arrowElement=i,e.offsets.arrow=(Ve(n={},f,Math.round(y)),Ve(n,h,""),n),e},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(p,m){if(at(p.instance.modifiers,"inner"))return p;if(p.flipped&&p.placement===p.originalPlacement)return p;var g=$e(p.instance.popper,p.instance.reference,m.padding,m.boundariesElement,p.positionFixed),_=p.placement.split("-")[0],v=it(_),y=p.placement.split("-")[1]||"",E=[];switch(m.behavior){case vt:E=[_,v];break;case yt:E=_t(_);break;case Et:E=_t(_,!0);break;default:E=m.behavior}return E.forEach(function(e,t){if(_!==e||E.length===t+1)return p;_=p.placement.split("-")[0],v=it(_);var n,i=p.offsets.popper,r=p.offsets.reference,o=Math.floor,s="left"===_&&o(i.right)>o(r.left)||"right"===_&&o(i.left)o(r.top)||"bottom"===_&&o(i.top)o(g.right),c=o(i.top)o(g.bottom),f="left"===_&&a||"right"===_&&l||"top"===_&&c||"bottom"===_&&u,h=-1!==["top","bottom"].indexOf(_),d=!!m.flipVariations&&(h&&"start"===y&&a||h&&"end"===y&&l||!h&&"start"===y&&c||!h&&"end"===y&&u);(s||f||d)&&(p.flipped=!0,(s||f)&&(_=E[t+1]),d&&(y="end"===(n=y)?"start":"start"===n?"end":n),p.placement=_+(y?"-"+y:""),p.offsets.popper=ze({},p.offsets.popper,rt(p.instance.popper,p.offsets.reference,p.placement)),p=st(p.instance.modifiers,p,"flip"))}),p},behavior:"flip",padding:5,boundariesElement:"viewport"},inner:{order:700,enabled:!1,fn:function(e){var t=e.placement,n=t.split("-")[0],i=e.offsets,r=i.popper,o=i.reference,s=-1!==["left","right"].indexOf(n),a=-1===["top","left"].indexOf(n);return r[s?"left":"top"]=o[n]-(a?r[s?"width":"height"]:0),e.placement=it(t),e.offsets.popper=Ge(r),e}},hide:{order:800,enabled:!0,fn:function(e){if(!pt(e.instance.modifiers,"hide","preventOverflow"))return e;var t=e.offsets.reference,n=ot(e.instance.modifiers,function(e){return"preventOverflow"===e.name}).boundaries;if(t.bottomn.right||t.top>n.bottom||t.rightdocument.documentElement.clientHeight;!this._isBodyOverflowing&&e&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!e&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var e=document.body.getBoundingClientRect();this._isBodyOverflowing=e.left+e.right
',trigger:"hover focus",title:"",delay:0,html:!(An={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"}),selector:!(Dn={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)"}),placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent"},Nn="out",kn={HIDE:"hide"+wn,HIDDEN:"hidden"+wn,SHOW:(On="show")+wn,SHOWN:"shown"+wn,INSERTED:"inserted"+wn,CLICK:"click"+wn,FOCUSIN:"focusin"+wn,FOCUSOUT:"focusout"+wn,MOUSEENTER:"mouseenter"+wn,MOUSELEAVE:"mouseleave"+wn},xn="fade",Pn="show",Ln=".tooltip-inner",jn=".arrow",Hn="hover",Mn="focus",Fn="click",Wn="manual",Rn=function(){function i(e,t){if("undefined"==typeof Ct)throw new TypeError("Bootstrap tooltips require Popper.js (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=e,this.config=this._getConfig(t),this.tip=null,this._setListeners()}var e=i.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(e){if(this._isEnabled)if(e){var t=this.constructor.DATA_KEY,n=yn(e.currentTarget).data(t);n||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(t,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(yn(this.getTipElement()).hasClass(Pn))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),yn.removeData(this.element,this.constructor.DATA_KEY),yn(this.element).off(this.constructor.EVENT_KEY),yn(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&yn(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===yn(this.element).css("display"))throw new Error("Please use show on visible elements");var e=yn.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){yn(this.element).trigger(e);var n=yn.contains(this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!n)return;var i=this.getTipElement(),r=we.getUID(this.constructor.NAME);i.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&yn(i).addClass(xn);var o="function"==typeof this.config.placement?this.config.placement.call(this,i,this.element):this.config.placement,s=this._getAttachment(o);this.addAttachmentClass(s);var a=!1===this.config.container?document.body:yn(document).find(this.config.container);yn(i).data(this.constructor.DATA_KEY,this),yn.contains(this.element.ownerDocument.documentElement,this.tip)||yn(i).appendTo(a),yn(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new Ct(this.element,i,{placement:s,modifiers:{offset:{offset:this.config.offset},flip:{behavior:this.config.fallbackPlacement},arrow:{element:jn},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(e){e.originalPlacement!==e.placement&&t._handlePopperPlacementChange(e)},onUpdate:function(e){t._handlePopperPlacementChange(e)}}),yn(i).addClass(Pn),"ontouchstart"in document.documentElement&&yn(document.body).children().on("mouseover",null,yn.noop);var l=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,yn(t.element).trigger(t.constructor.Event.SHOWN),e===Nn&&t._leave(null,t)};if(yn(this.tip).hasClass(xn)){var c=we.getTransitionDurationFromElement(this.tip);yn(this.tip).one(we.TRANSITION_END,l).emulateTransitionEnd(c)}else l()}},e.hide=function(e){var t=this,n=this.getTipElement(),i=yn.Event(this.constructor.Event.HIDE),r=function(){t._hoverState!==On&&n.parentNode&&n.parentNode.removeChild(n),t._cleanTipClass(),t.element.removeAttribute("aria-describedby"),yn(t.element).trigger(t.constructor.Event.HIDDEN),null!==t._popper&&t._popper.destroy(),e&&e()};if(yn(this.element).trigger(i),!i.isDefaultPrevented()){if(yn(n).removeClass(Pn),"ontouchstart"in document.documentElement&&yn(document.body).children().off("mouseover",null,yn.noop),this._activeTrigger[Fn]=!1,this._activeTrigger[Mn]=!1,this._activeTrigger[Hn]=!1,yn(this.tip).hasClass(xn)){var o=we.getTransitionDurationFromElement(n);yn(n).one(we.TRANSITION_END,r).emulateTransitionEnd(o)}else r();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(e){yn(this.getTipElement()).addClass(Tn+"-"+e)},e.getTipElement=function(){return this.tip=this.tip||yn(this.config.template)[0],this.tip},e.setContent=function(){var e=this.getTipElement();this.setElementContent(yn(e.querySelectorAll(Ln)),this.getTitle()),yn(e).removeClass(xn+" "+Pn)},e.setElementContent=function(e,t){var n=this.config.html;"object"==typeof t&&(t.nodeType||t.jquery)?n?yn(t).parent().is(e)||e.empty().append(t):e.text(yn(t).text()):e[n?"html":"text"](t)},e.getTitle=function(){var e=this.element.getAttribute("data-original-title");return e||(e="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),e},e._getAttachment=function(e){return An[e.toUpperCase()]},e._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(e){if("click"===e)yn(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(e){return i.toggle(e)});else if(e!==Wn){var t=e===Hn?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=e===Hn?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;yn(i.element).on(t,i.config.selector,function(e){return i._enter(e)}).on(n,i.config.selector,function(e){return i._leave(e)})}yn(i.element).closest(".modal").on("hide.bs.modal",function(){return i.hide()})}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var e=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==e)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(e,t){var n=this.constructor.DATA_KEY;(t=t||yn(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusin"===e.type?Mn:Hn]=!0),yn(t.getTipElement()).hasClass(Pn)||t._hoverState===On?t._hoverState=On:(clearTimeout(t._timeout),t._hoverState=On,t.config.delay&&t.config.delay.show?t._timeout=setTimeout(function(){t._hoverState===On&&t.show()},t.config.delay.show):t.show())},e._leave=function(e,t){var n=this.constructor.DATA_KEY;(t=t||yn(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusout"===e.type?Mn:Hn]=!1),t._isWithActiveTrigger()||(clearTimeout(t._timeout),t._hoverState=Nn,t.config.delay&&t.config.delay.hide?t._timeout=setTimeout(function(){t._hoverState===Nn&&t.hide()},t.config.delay.hide):t.hide())},e._isWithActiveTrigger=function(){for(var e in this._activeTrigger)if(this._activeTrigger[e])return!0;return!1},e._getConfig=function(e){return"number"==typeof(e=l({},this.constructor.Default,yn(this.element).data(),"object"==typeof e&&e?e:{})).delay&&(e.delay={show:e.delay,hide:e.delay}),"number"==typeof e.title&&(e.title=e.title.toString()),"number"==typeof e.content&&(e.content=e.content.toString()),we.typeCheckConfig(En,e,this.constructor.DefaultType),e},e._getDelegateConfig=function(){var e={};if(this.config)for(var t in this.config)this.constructor.Default[t]!==this.config[t]&&(e[t]=this.config[t]);return e},e._cleanTipClass=function(){var e=yn(this.getTipElement()),t=e.attr("class").match(Sn);null!==t&&t.length&&e.removeClass(t.join(""))},e._handlePopperPlacementChange=function(e){var t=e.instance;this.tip=t.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(e.placement))},e._fixTransition=function(){var e=this.getTipElement(),t=this.config.animation;null===e.getAttribute("x-placement")&&(yn(e).removeClass(xn),this.config.animation=!1,this.hide(),this.show(),this.config.animation=t)},i._jQueryInterface=function(n){return this.each(function(){var e=yn(this).data(bn),t="object"==typeof n&&n;if((e||!/dispose|hide/.test(n))&&(e||(e=new i(this,t),yn(this).data(bn,e)),"string"==typeof n)){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return In}},{key:"NAME",get:function(){return En}},{key:"DATA_KEY",get:function(){return bn}},{key:"Event",get:function(){return kn}},{key:"EVENT_KEY",get:function(){return wn}},{key:"DefaultType",get:function(){return Dn}}]),i}(),yn.fn[En]=Rn._jQueryInterface,yn.fn[En].Constructor=Rn,yn.fn[En].noConflict=function(){return yn.fn[En]=Cn,Rn._jQueryInterface},Rn),Qi=(Bn="popover",Kn="."+(qn="bs.popover"),Qn=(Un=t).fn[Bn],Yn="bs-popover",Vn=new RegExp("(^|\\s)"+Yn+"\\S+","g"),zn=l({},Ki.Default,{placement:"right",trigger:"click",content:"",template:''}),Gn=l({},Ki.DefaultType,{content:"(string|element|function)"}),Jn="fade",Xn=".popover-header",$n=".popover-body",ei={HIDE:"hide"+Kn,HIDDEN:"hidden"+Kn,SHOW:(Zn="show")+Kn,SHOWN:"shown"+Kn,INSERTED:"inserted"+Kn,CLICK:"click"+Kn,FOCUSIN:"focusin"+Kn,FOCUSOUT:"focusout"+Kn,MOUSEENTER:"mouseenter"+Kn,MOUSELEAVE:"mouseleave"+Kn},ti=function(e){var t,n;function i(){return e.apply(this,arguments)||this}n=e,(t=i).prototype=Object.create(n.prototype),(t.prototype.constructor=t).__proto__=n;var r=i.prototype;return r.isWithContent=function(){return this.getTitle()||this._getContent()},r.addAttachmentClass=function(e){Un(this.getTipElement()).addClass(Yn+"-"+e)},r.getTipElement=function(){return this.tip=this.tip||Un(this.config.template)[0],this.tip},r.setContent=function(){var e=Un(this.getTipElement());this.setElementContent(e.find(Xn),this.getTitle());var t=this._getContent();"function"==typeof t&&(t=t.call(this.element)),this.setElementContent(e.find($n),t),e.removeClass(Jn+" "+Zn)},r._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},r._cleanTipClass=function(){var e=Un(this.getTipElement()),t=e.attr("class").match(Vn);null!==t&&0=this._offsets[r]&&("undefined"==typeof this._offsets[r+1]||e 1) { - document.getElementById("int_after").disabled = false; - } else { - document.getElementById("int_after").disabled = true; - } - - // Update the image download and header links - // document.getElementById("download_fits").href = '/static/filesystem/' + file_root.slice(0,7) + '/' + fits_filename + '.fits'; - // document.getElementById("download_jpg").href = jpg_filepath; + // Update the image exploration and header links document.getElementById("view_header").href = '/' + inst + '/' + file_root + '_' + type + '/header/'; - - // Disable the "left" button, since this will be showing integ0 - document.getElementById("int_before").disabled = true; - -}; + document.getElementById("explore_image").href = '/' + inst + '/' + file_root + '_' + type + '/explore_image/'; +} /** @@ -76,43 +153,47 @@ function change_filetype(type, file_root, num_ints, available_ints, inst) { * @param {Dict} num_ints - A dictionary whose keys are suffix types and whose * values are the number of integrations for that suffix * @param {Dict} available_ints - A dictionary whose keys are suffix types and whose - * values are the integration numbers of the available + * values are the integration numbers of the available * jpgs for that suffix * @param {String} method - How the integration change was initialized, either "button" or "slider" * @param {String} direction - The direction to switch to, either "left" (decrease) or "right" (increase). * Only relevant if method is "button". */ -function change_int(file_root, num_ints, available_ints, method, direction = 'right') { +function change_integration(file_root, num_ints, available_ints, method, direction='right') { // Figure out the current image and integration - var suffix = document.getElementById("jpg_filename").innerHTML.split('_'); - var integration = Number(suffix[suffix.length - 1].replace('.jpg','').replace('integ','')) - var suffix = suffix[suffix.length - 2]; - var program = file_root.slice(0,7); + var suffix = document.getElementById("view_file_type").getAttribute('data-current-suffix'); + var integration = Number(document.getElementById("slider_val").innerText) - 1; + var program = parse_filename(file_root).program; // Find the total number of integrations for the current image - var num_ints = num_ints.replace(/'/g, '"'); - var num_ints = JSON.parse(num_ints)[suffix]; + num_ints = num_ints.replace(/'/g, '"'); + num_ints = JSON.parse(num_ints)[suffix]; // Get the available integration jpg numbers and the current integration index - var available_ints = available_ints.replace(/'/g, '"'); - var available_ints = JSON.parse(available_ints)[suffix]; + available_ints = available_ints.replace(/'/g, '"'); + available_ints = JSON.parse(available_ints)[suffix]; var current_index = available_ints.indexOf(integration); // Get the desired integration value + var new_integration; + var new_value; switch (method) { case "button": if ((integration == num_ints - 1 && direction == 'right')|| (integration == 0 && direction == 'left')) { return; } else if (direction == 'right') { - new_integration = available_ints[current_index + 1] + new_value = current_index + 1; + new_integration = available_ints[new_value]; } else if (direction == 'left') { - new_integration = available_ints[current_index - 1] + new_value = current_index - 1; + new_integration = available_ints[new_value]; } break; case "slider": - new_integration = available_ints[document.getElementById("slider_range").value - 1]; + new_value = document.getElementById("slider_range").value - 1; + new_integration = available_ints[new_value]; break; } @@ -128,51 +209,50 @@ function change_int(file_root, num_ints, available_ints, method, direction = 'ri document.getElementById("int_before").disabled = false; } - // Update the JPG filename - var jpg_filename = file_root + '_' + suffix + '_integ' + new_integration + '.jpg' - var jpg_filepath = '/static/preview_images/' + program + '/' + jpg_filename - document.getElementById("jpg_filename").innerHTML = jpg_filename; + var img_viewers = document.getElementsByClassName("image_preview_viewer"); + for (let i = 0; i < img_viewers.length; i++) { + var img = img_viewers[i]; - // Show the appropriate image - var img = document.getElementById("image_viewer") - img.src = jpg_filepath; - img.alt = jpg_filepath; + var jpg_filename; + var detector = img.getAttribute('data-detector'); + if (detector != null) { + // exposure view + jpg_filename = file_root + '_' + detector + '_' + suffix + '_integ' + new_integration + '.jpg'; + } else { + // image view + jpg_filename = file_root + '_' + suffix + '_integ' + new_integration + '.jpg'; + document.getElementById("jpg_filename").innerHTML = jpg_filename; + // if previous image had error, remove error sizing + img.classList.remove("thumbnail"); + } + + // Show the appropriate image + var jpg_filepath = '/static/preview_images/' + program + '/' + jpg_filename; + img.src = jpg_filepath; + img.alt = jpg_filepath; - // Update the jpg download link - // document.getElementById("download_jpg").href = jpg_filepath; + // Show/hide the viewer as appropriate for the image + if (detector != null) { + show_viewer(detector, jpg_filepath); + } + } // Update the slider values - document.getElementById("slider_range").value = new_integration + 1 - document.getElementById("slider_val").innerHTML = new_integration + 1 -}; + document.getElementById("slider_range").value = new_value + 1; + document.getElementById("slider_val").innerHTML = new_integration + 1; +} /** - * Determine what filetype to use for a thumbnail - * @param {String} thumbnail_dir - The path to the thumbnail directory - * @param {List} suffixes - A list of available suffixes for the file of interest - * @param {Integer} i - The index of the thumbnail - * @param {String} file_root - The rootname of the file corresponding to the thumbnail + * Clean garbage characters in input dictionary parameters passed as strings. + * @param {String} param_value - The parameter value to clean + * @returns {String} cleaned - The cleaned parameter value */ -function determine_filetype_for_thumbnail(thumbnail_dir, suffixes, i, file_root) { - - // Update the thumbnail to show the most processed filetype - var img = document.getElementById('thumbnail'+i); - if (suffixes.indexOf("cal") >= 0) { - var jpg_path = thumbnail_dir + file_root.slice(0,7) + '/' + file_root + '_cal_integ0.thumb'; - img.src = jpg_path; - } else if (suffixes.indexOf("rate") >= 0) { - var jpg_path = thumbnail_dir + file_root.slice(0,7) + '/' + file_root + '_rate_integ0.thumb'; - img.src = jpg_path; - } else if (suffixes.indexOf("uncal") >= 0) { - var jpg_path = thumbnail_dir + file_root.slice(0,7) + '/' + file_root + '_uncal_integ0.thumb'; - img.src = jpg_path; - } else if (suffixes.indexOf("dark") >= 0) { - var jpg_path = thumbnail_dir + file_root.slice(0,7) + '/' + file_root + '_dark_integ0.thumb'; - img.src = jpg_path; - }; - -}; +function clean_input_parameter(param_value) { + param_value = param_value.replace(/'/g, '"'); + param_value = param_value.replace(/'/g, '"'); + return param_value +} /** @@ -185,6 +265,7 @@ function determine_page_title(instrument, proposal) { var url = document.URL; var url_split = url.split('/'); var url_title = url_split[url_split.length - 2]; + var final_title; if (url_title == 'archive') { final_title = 'Archived ' + instrument + ' Images: Proposal ' + proposal } else if (url_title == 'unlooked') { @@ -196,9 +277,365 @@ function determine_page_title(instrument, proposal) { document.getElementById('title').innerHTML = final_title; if (document.title != final_title) { document.title = final_title; - }; - }; -}; + } + } +} + +/** + * Determine whether the page is archive or unlooked + * @param {String} instrument - The instrument of interest + * @param {Integer} proposal - The proposal of interest + * @param {Integer} observation - The observation number of interest + */ +function determine_page_title_obs(instrument, proposal, observation) { + // Determine if the URL is 'archive' or 'unlooked' + var url = document.URL; + var url_split = url.split('/'); + var url_title = url_split[url_split.length - 3]; + var final_title; + if (url_title == 'archive') { + final_title = 'Archived ' + instrument + ' Images: Proposal ' + proposal + ', Observation ' + observation + } else if (url_title == 'unlooked') { + final_title = 'Unlooked ' + instrument + ' Images'; + } else if (isNaN(url_title) == false) { + final_title = 'Archived ' + instrument + ' Images: Proposal ' + proposal + ', Observation ' + observation + } + + // Update the titles accordingly + if (typeof final_title !== 'undefined') { + document.getElementById('title').innerHTML = final_title; + if (document.title != final_title) { + document.title = final_title; + } + } +} + +/** + * adds/removes disabled_section class and clears value + * @param {string} element_id + * @param {boolean} set_disable + */ + function set_disabled_section (element_id, set_disable) { + + if (set_disable) { + document.getElementById(element_id).classList.add("disabled_section"); + } else { + document.getElementById(element_id).classList.remove("disabled_section"); + } +} +/** + * Interprets number of integrations/groups for the selected extension and disables input for calculating difference accordingly + * @param {Dict} integrations - A dictionary whose keys are extensions and whose + * values are the number of integrations for that suffix + * @param {Dict} groups - A dictionary whose keys are extensions and whose + * values are the number of groups for that suffix + */ +function explore_image_update_enable_options(integrations, groups) { + + // Check nr of integrations and groups of currently selected extension + var ext_name = get_radio_button_value("extension"); + + // Clean the input parameters and get our integrations/groups for this extension + var calc_difference = false; + integrations = integrations.replace(/'/g, '"'); + integrations = integrations.replace(/'/g, '"'); + integrations = JSON.parse(integrations)[ext_name]; + groups = groups.replace(/'/g, '"'); + groups = groups.replace(/'/g, '"'); + groups = JSON.parse(groups)[ext_name]; + + // Zero base our calculations + integrations -= 1 + groups -=1 + + // Set max values to those available + document.getElementById("integration1").max = integrations; + document.getElementById("integration2").max = integrations; + document.getElementById("group1").max = groups; + document.getElementById("group2").max = groups; + + + // If multiple integrations or groups. Allow difference calculations + // enable calculate_difference box + // enable subtrahend boxes + if (integrations > 0 || groups > 0) { + set_disabled_section("calcDifferenceForm", false); + calc_difference = document.getElementById("calcDifference").checked; + + } else { + document.getElementById("calcDifference").checked.value = false; + set_disabled_section("calcDifferenceForm", true); + } + + if (!calc_difference) { + document.getElementById("integration2").value = null; + document.getElementById("group2").value = null; + } + if (integrations < 1) { + document.getElementById("integration1").value = null; + document.getElementById("integration2").value = null; + } + if (groups < 1){ + document.getElementById("group1").value = null; + document.getElementById("group2").value = null; + } + // Add/remove disable class to integration/group input if not multiple + set_disabled_section("integrationInput1", (integrations < 1)); + set_disabled_section("groupInput1", (groups < 1)); + set_disabled_section("integrationInput2", (!calc_difference || integrations < 1)); + set_disabled_section("groupInput2", (!calc_difference || groups < 1)); + +} + + +/** + * getCookie + * taken from https://docs.djangoproject.com/en/4.1/howto/csrf/ + * @param {String} name - The name of the cookie element you want to extract + * @returns value - value of the extracted cookie element + */ +function getCookie(name) { + let cookieValue = null; + if (document.cookie && document.cookie !== '') { + const cookies = document.cookie.split(';'); + for (let i = 0; i < cookies.length; i++) { + const cookie = cookies[i].trim(); + // Does this cookie string begin with the name we want? + if (cookie.substring(0, name.length + 1) === (name + '=')) { + cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); + break; + } + } + } + return cookieValue; +} + + +/** + * get_radio_button_value + * @param {String} element_name - The name of the radio buttons + * @returns value - value of checked radio button + */ +function get_radio_button_value(element_name) { + var element = document.getElementsByName(element_name); + + for(var i = 0; i < element.length; i++) { + if(element[i].checked) { + return element[i].value; + } + } + return ""; +} + +/** + * Get value from a numerical text field + * @param {String} element_id - The element id + * @returns value - value of element id or "None" if empty or not a number +*/ +function get_number_or_none(element_id) { + + var limit = document.getElementById(element_id).value; + if (limit.length == 0 || isNaN(limit)) limit = "None"; + return limit; +} + + +/** + * Group thumbnail display by exposure or file, save group type in session + * @param {String} group_type - The group type + * @param {String} base_url - The base URL for gathering data from the AJAX view. + */ +function group_by_thumbnails(group_type, base_url) { + + // Update dropdown menu text and update thumbnails for current setting + show_only('group', group_type, base_url); + + // Group divs to update display style + var group_divs = document.getElementsByClassName("thumbnail-group"); + // Thumbnail links to update to group or image pages + var thumbnail_links = document.getElementsByClassName("thumbnail-link"); + // Show count total and type to update + var img_total = document.getElementById('img_total'); + var img_type = document.getElementById('img_type'); + var group_by = document.getElementById('group_by') + + if (group_type == 'Exposure') { + img_total.innerText = group_by.getAttribute('data-ngroup'); + img_type.innerText = 'groups'; + for (let i = 0; i < group_divs.length; i++) { + group_divs[i].classList.add('thumbnail-group-active'); + thumbnail_links[i].href = thumbnail_links[i].getAttribute('data-group-href'); + } + } else { + img_total.innerText = group_by.getAttribute('data-nfile'); + img_type.innerText = 'activities'; + for (let i = 0; i < group_divs.length; i++) { + group_divs[i].classList.remove('thumbnail-group-active'); + thumbnail_links[i].href = thumbnail_links[i].getAttribute('data-image-href'); + } + } + + $.ajax({ + url: base_url + '/ajax/image_group/', + data: { + 'group_type': group_type + }, + error : function(response) { + console.log("session image group update failed"); + } + }); +} + + +/** + * Hide an image viewer + * @param {String} detector - The detector name for the image viewer + */ +function hide_file(detector) { + var img = document.getElementById("image_viewer_" + detector); + var div = document.getElementById(detector + "_view"); + var filename = document.getElementById(detector + "_filename"); + + // Hide the image and div + img.style.display = "none"; + div.style.display = "none"; + + // Disable the associated filename unless there + // are no previews available at all + var fallback_shown = document.getElementById(detector + "_view_fallback"); + if (fallback_shown.style.display == "none") { + filename.disabled = true; + } + + // Update the view/explore link as needed + update_view_explore_link(); +} + +/** + * Show an image viewer + * @param {String} detector - The detector name for the image viewer + */ +function unhide_file(detector) { + var img = document.getElementById("image_viewer_" + detector); + var div = document.getElementById(detector + "_view"); + var filename = document.getElementById(detector + "_filename"); + + // Show the image and div + img.style.display = "inline-block"; + div.style.display = "inline-block"; + + // Hide the fallback image and div + // These are never re-displayed: if any image loads for the detector, + // they will not show up. This is intended to cover the case where FITS files + // exist for the exposure, but no preview images have been generated yet. + document.getElementById("fallback_image_viewer_" + detector).style.display = "none"; + document.getElementById(detector + "_view_fallback").style.display = "none"; + + // Enable the associated filename + filename.disabled = false; + + // Update the view/explore link as needed + update_view_explore_link(); +} + + +/** + * Insert thumbnail images inside existing HTML img tags + * @param {List} updates - A list of updates to make, as [thumbnail_id, jpg_path]. + */ +function insert_thumbnail_images(updates) { + // Update the thumbnail image source + for (var i = 0; i < updates.length; i++) { + var thumb_id = updates[i][0]; + var jpg_path = updates[i][1]; + set_thumbnail_image_source(thumb_id, jpg_path); + } +} + + +/** + * Check for a thumbnail image and add it to an img if it exists + * @param {Integer} thumb_id - The ID number for the thumbnail img + * @param {String} jpg_filepath - The image to show + */ +function set_thumbnail_image_source(thumb_id, jpg_path) { + $.get(jpg_path, function() { + var img = document.getElementById('thumbnail' + thumb_id); + img.src = jpg_path;}) +} + + +/** + * Parse observation information from a JWST file name. + * @param {String} filename - The file or group root name to parse + * @returns {Object} parsed - Dictionary containing 'proposal', 'obs_id', 'visit_id', 'program' + */ +function parse_filename(root_name) { + // eg. for root_name jw02589006001_04101_00001-seg001_nrs1: + // program = jw02589 + // proposal = 02589 + // obs_id = 006 + // visit_id = 001 + + // used for preview directories + var program = root_name.slice(0,7); + + // used for observation description fields + var proposal = root_name.slice(2, 7); + var obs_id = root_name.slice(7, 10); + var visit_id = root_name.slice(10, 13); + + const parsed_name = {program: program, proposal: proposal, + obs_id: obs_id, visit_id: visit_id}; + return parsed_name; +} + + +/** + * Reset the integration slider for a new file + * @param {Int} num_integration - The number of integration images available + * @param {Int} total_integration - The total number of integrations to display + */ +function reset_integration_slider(num_integration, total_integration) { + // Reset the slider values + document.getElementById("slider_range").value = 1; + document.getElementById("slider_range").max = num_integration; + document.getElementById("slider_val").innerHTML = 1; + document.getElementById("total_ints").innerHTML = total_integration; + + // Update the integration changing buttons + if (num_integration > 1) { + document.getElementById("int_after").disabled = false; + } else { + document.getElementById("int_after").disabled = true; + } + + // Disable the "left" button, since this will be showing integ0 + document.getElementById("int_before").disabled = true; +} + + +/** + * Check for a detector image and show or hide its viewer accordingly. + * @param {String} detector - The detector name + * @param {String} jpg_filepath - The image to show + */ +function show_viewer(detector, jpg_filepath) { + $.get(jpg_filepath, function() {unhide_file(detector);}) + .fail(function() {hide_file(detector)}); +} + +/** + * If an image is not found, replace with temporary image sized to thumbnail + */ +function image_error(image, makeThumbnail=false) { + image.src = "/static/img/imagenotfound.png"; + /* Use thumbnail settings to keep it tidy */ + if (makeThumbnail) { + image.className = "thumbnail"; + } + return true; +} /** @@ -208,48 +645,49 @@ function search() { // Find all proposal elements var proposals = document.getElementsByClassName("proposal"); - var n_proposals = document.getElementsByClassName("proposal").length; // Determine the current search value var search_value = document.getElementById("search_box").value; // Determine whether or not to display each thumbnail var num_proposals_displayed = 0; - for (i = 0; i < proposals.length; i++) { + for (var i = 0; i < proposals.length; i++) { // Evaluate if the proposal number matches the search var j = i + 1 - var prop_name = document.getElementById("proposal" + j).getAttribute('proposal') + var prop_name = document.getElementById("proposal" + j).getAttribute('data-proposal') var prop_num = Number(prop_name) - if (prop_name.startsWith(search_value) || prop_num.toString().startsWith(search_value)) { proposals[i].style.display = "inline-block"; num_proposals_displayed++; } else { proposals[i].style.display = "none"; } - }; + } // If there are no proposals to display, tell the user if (num_proposals_displayed == 0) { document.getElementById('no_proposals_msg').style.display = 'inline-block'; } else { document.getElementById('no_proposals_msg').style.display = 'none'; - }; + } // Update the count of how many images are being shown - document.getElementById('img_show_count').innerHTML = 'Showing ' + num_proposals_displayed + '/' + n_proposals + ' proposals'; -}; + document.getElementById('img_shown').innerText = num_proposals_displayed; +} /** * Limit the displayed thumbnails based on filter criteria - * @param {String} filter_type - The filter type. Currently only "sort" is supported. + * @param {String} filter_type - The filter type. * @param {Integer} value - The filter value - * @param {List} dropdown_keys - A list of dropdown menu keys - * @param {Integer} num_fileids - The number of files that are available to display + * @param {String} base_url - The base URL for gathering data from the AJAX view. */ -function show_only(filter_type, value, dropdown_keys, num_fileids) { +function show_only(filter_type, value, base_url) { + + var filter_div = document.getElementById('filter_by'); + var dropdown_keys = filter_div.getAttribute('data-dropdown-key-list'); + var thumbnail_class = filter_div.getAttribute('data-thumbnail-class'); // Get all filter options from {{dropdown_menus}} variable var all_filters = dropdown_keys.split(','); @@ -257,45 +695,89 @@ function show_only(filter_type, value, dropdown_keys, num_fileids) { // Update dropdown menu text document.getElementById(filter_type + '_dropdownMenuButton').innerHTML = value; - // Find all thumbnail elements - var thumbnails = document.getElementsByClassName("thumbnail"); + // Check for grouping setting for special handling + var group_option = document.getElementById('group_dropdownMenuButton') + var group = false; + if (group_option != null) { + group = (group_option.innerText == 'Exposure'); + } // Determine the current value for each filter var filter_values = []; - for (j = 0; j < all_filters.length; j++) { + for (var j = 0; j < all_filters.length; j++) { var filter_value = document.getElementById(all_filters[j] + '_dropdownMenuButton').innerHTML; filter_values.push(filter_value); } + // Find all thumbnail elements + var thumbnails = document.getElementsByClassName(thumbnail_class); + // Determine whether or not to display each thumbnail var num_thumbnails_displayed = 0; - for (i = 0; i < thumbnails.length; i++) { + var list_of_rootnames = ""; + var groups_shown = new Set(); + for (var i = 0; i < thumbnails.length; i++) { // Evaluate if the thumbnail meets all filter criteria var criteria = []; for (j = 0; j < all_filters.length; j++) { - var criterion = (filter_values[j].indexOf('All '+ all_filters[j] + 's') >=0) || (thumbnails[i].getAttribute(all_filters[j]) == filter_values[j]); + var filter_attribute = thumbnails[i].getAttribute('data-' + all_filters[j]); + var criterion = (filter_values[j].indexOf('All '+ all_filters[j] + 's') >=0) + || (filter_attribute.includes(filter_values[j])); criteria.push(criterion); - }; + } - // Only display if all filter criteria are met + // If data are grouped, check if a thumbnail for the group has already been displayed + var show_group = true; + if (group && groups_shown.has(thumbnails[i].getAttribute('data-group_root'))) { + show_group = false; + } + + // Only display if all criteria are met if (criteria.every(function(r){return r})) { - thumbnails[i].style.display = "inline-block"; - num_thumbnails_displayed++; + // if group has already been shown, do not show thumbnail, + // but do store the file root for navigation + if (show_group) { + thumbnails[i].style.display = "inline-block"; + num_thumbnails_displayed++; + if (group) { groups_shown.add(thumbnails[i].getAttribute('data-group_root')); } + } else { + thumbnails[i].style.display = "none"; + } + list_of_rootnames = list_of_rootnames + + thumbnails[i].getAttribute("data-file_root") + + '=' + thumbnails[i].getAttribute("data-exp_start") + ','; } else { thumbnails[i].style.display = "none"; } - }; - - // If there are no thumbnails to display, tell the user - if (num_thumbnails_displayed == 0) { - document.getElementById('no_thumbnails_msg').style.display = 'inline-block'; - } else { - document.getElementById('no_thumbnails_msg').style.display = 'none'; - }; + } + if (document.getElementById('no_thumbnails_msg') != null) { + // If there are no thumbnails to display, tell the user + if (num_thumbnails_displayed == 0) { + document.getElementById('no_thumbnails_msg').style.display = 'inline-block'; + } else { + document.getElementById('no_thumbnails_msg').style.display = 'none'; + } + } // Update the count of how many images are being shown - document.getElementById('img_show_count').innerHTML = 'Showing ' + num_thumbnails_displayed + '/' + num_fileids + ' activities' -}; + document.getElementById('img_shown').innerText = num_thumbnails_displayed; + if (num_thumbnails_displayed) { + // remove trailing ','. + list_of_rootnames = list_of_rootnames.slice(0, -1); + const csrftoken = getCookie('csrftoken'); + $.ajax({ + type: 'POST', + url: base_url + '/ajax/navigate_filter/', + headers: { "X-CSRFToken": csrftoken }, + data:{ + 'navigate_dict': list_of_rootnames + }, + error : function(response) { + console.log("navigate_filter update failed"); + } + }); + } +} /** @@ -312,33 +794,153 @@ function sort_by_proposals(sort_type) { tinysort(props, {order:'asc'}); } else if (sort_type == 'Descending') { tinysort(props, {order:'desc'}); + } else if (sort_type == 'Recent') { + // Sort by the most recent Observation Start + tinysort(props, {order:'desc', attr:'data-obs_time'}); } -}; +} /** - * Sort thumbnail display by a given sort type - * @param {String} sort_type - The sort type (e.g. file_root", "exp_start") + * Sort thumbnail display by a given sort type, save sort type in session for use in previous/next buttons + * @param {String} sort_type - The sort type by file name + * @param {String} base_url - The base URL for gathering data from the AJAX view. */ -function sort_by_thumbnails(sort_type) { +function sort_by_thumbnails(sort_type, base_url) { // Update dropdown menu text document.getElementById('sort_dropdownMenuButton').innerHTML = sort_type; - // Sort the thumbnails accordingly + // Sort the thumbnails accordingly. + // Note: Because thumbnails will sort relating to their current order (when the exp_start is the same between thumbnails), we need to do multiple sorts to guarantee consistency. + var thumbs = $('div#thumbnail-array>div') - if (sort_type == 'Name') { - tinysort(thumbs, {attr:'file_root'}); - } else if (sort_type == 'Default') { - tinysort(thumbs, {selector: 'img', attr:'id'}); - } else if (sort_type == 'Exposure Start Time') { - tinysort(thumbs, {attr:'exp_start'}); + if (sort_type == 'Descending') { + tinysort(thumbs, {attr:'data-file_root', order:'desc'}); + } else if (sort_type == 'Recent') { + tinysort(thumbs, {attr:'data-exp_start', order:'desc'}, {attr:'data-file_root', order:'asc'}); + } else if (sort_type == 'Oldest') { + tinysort(thumbs, {attr:'data-exp_start', order:'asc'}, {attr:'data-file_root', order:'asc'}); + } else { + // Default to 'Ascending' + tinysort(thumbs, {attr:'data-file_root', order:'asc'}); } -}; + $.ajax({ + url: base_url + '/ajax/image_sort/', + data: { + 'sort_type': sort_type + }, + error : function(response) { + console.log("session image sort update failed"); + } + }); +} /** - * Updates various compnents on the archive page + * Toggle a viewed button when pressed. + * Ajax call to update RootFileInfo model with toggled value + * + * @param {String} file_root - The rootname of the file corresponding to the thumbnail + * @param {String} base_url - The base URL for gathering data from the AJAX view. + */ +function toggle_viewed(file_root, base_url) { + // Toggle the button immediately so user isn't confused + // (ajax result will confirm choice or fix on failure) + var elem = document.getElementById("viewed"); + update_viewed_button(elem.value == "New" ? true : false); + elem.disabled=true; + + // Ajax Call to update RootFileInfo model with "viewed" info + $.ajax({ + url: base_url + '/ajax/viewed/' + file_root, + success: function(data){ + // Update button with actual value (paranoia update, should not yield visible change) + update_viewed_button(data["marked_viewed"]); + elem.disabled=false; + }, + error : function(response) { + // If update fails put button back to original state + update_viewed_button(elem.value == "New" ? false : true); + elem.disabled=false; + + } + }); +} + + +/** + * Set the viewed status for a group of files. + * Ajax call to update RootFileInfo model with toggled value + * + * @param {String} group_root - The rootname of the exposure group + * @param {String} base_url - The base URL for gathering data from the AJAX view. + */ +function toggle_viewed_group(group_root, base_url) { + // Toggle the button immediately so user isn't confused + var elem = document.getElementById("viewed"); + var to_viewed = elem.value.includes('New'); + update_viewed_button(to_viewed, true); + elem.disabled=true; + + // Ajax Call to update RootFileInfo model with "viewed" info + $.ajax({ + url: base_url + '/ajax/viewed_group/' + group_root + '/' + (to_viewed ? 'viewed' : 'new'), + success: function(data){ + // Update button with actual value (paranoia update, should not yield visible change) + update_viewed_button(data["marked_viewed"], true); + elem.disabled=false; + }, + error : function(response) { + // If update fails put button back to original state + update_viewed_button(!to_viewed, true); + elem.disabled=false; + + } + }); +} + + +/** + * Download filtered data report + * @param {String} inst - The instrument in use + * @param {String} base_url - The base URL for gathering data from the AJAX view. + */ +function download_report(inst, base_url) { + var elem = document.getElementById('download_report_button'); + elem.disabled = true; + + // Get sort value + var sort_option = document.getElementById('sort_dropdownMenuButton').innerText; + var options = '?sort_as=' + sort_option.toLowerCase(); + + // Get search value - use as proposal.startswith + var search_value = document.getElementById("search_box").value; + if (search_value != '') { + options += '&proposal=' + search_value; + } + + // Get all filter values + var filter_div = document.getElementById('thumbnail-filter'); + var filters = filter_div.getElementsByClassName('dropdown-toggle'); + + for (var i=0; i < filters.length; i++) { + var name = filters[i].id.split('_dropdownMenuButton')[0]; + var status = filters[i].innerText.toLowerCase(); + if (!status.includes('all')) { + options += '&' + name + '=' + status; + } + } + var report_url = '/' + inst + '/report' + options; + console.log('Redirecting to: ' + report_url); + + // Redirect to download content + window.location = base_url + report_url; + elem.disabled = false; +} + +/** + * Updates various components on the archive page * @param {String} inst - The instrument of interest (e.g. "FGS") * @param {String} base_url - The base URL for gathering data from the AJAX view. */ @@ -348,22 +950,33 @@ function update_archive_page(inst, base_url) { success: function(data){ // Update the number of proposals displayed - num_proposals = data.thumbnails.proposals.length; + var num_proposals = data.thumbnails.proposals.length; update_show_count(num_proposals, 'proposals') + update_filter_options(data, base_url, 'proposal'); // Add content to the proposal array div for (var i = 0; i < data.thumbnails.proposals.length; i++) { // Parse out useful variables - prop = data.thumbnails.proposals[i]; - thumb = data.thumbnails.thumbnail_paths[i]; - n = data.thumbnails.num_files[i]; + var prop = data.thumbnails.proposals[i]; + var min_obsnum = data.min_obsnum[i]; + var thumb = data.thumbnails.thumbnail_paths[i]; + var n = data.thumbnails.num_files[i]; + var viewed = data.thumbnails.viewed[i]; + var exp_types = data.thumbnails.exp_types[i]; + var obs_time = data.thumbnails.obs_time[i]; + var cat_type = data.thumbnails.cat_types[i]; // Build div content - content = '
'; - content += ''; + content += ''; content += '' - content += ''; + content += ''; content += '
'; content += '
'; content += '

' + prop + '

'; @@ -376,23 +989,162 @@ function update_archive_page(inst, base_url) { // Replace loading screen with the proposal array div document.getElementById("loading").style.display = "none"; document.getElementById("proposal-array").style.display = "block"; - }; + } }}); -}; +} + + +/** + * Updates various components on the MSATA page + * @param {String} inst - The instrument of interest (e.g. "FGS") + * @param {String} base_url - The base URL for gathering data from the AJAX view. + */ +function update_msata_page(base_url) { + $.ajax({ + url: base_url + '/ajax/nirspec/msata/', + success: function(data){ + + // Build div content + var content = data["div"]; + content += data["script1"]; + content += data["script2"]; + + /* Add the content to the div + * Note: - -
-
-

Dynamic Anomaly Form

- {{ csrf_input }} -

Use this form to query the archive for anomalies for any instrument or combination of instruments. - Enter the instruments for which you would like to view the archive and other constraints. - Feel free to leave fields blank if you do not want to further constrain the database search. - Note, however, that it may increase the run time if you search a wider array of images. -

- - -
-
-
- -
- {% for option in form.instrument %} -
- {{ option }} -
- {% endfor %} -
-
- -
-
-
-
-
- FGS Aperture(s) - {% for field in form.fgs_aper %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- FGS Detector - {% for field in form.fgs_detector %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- FGS Read Pattern - {% for field in form.fgs_readpatt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- FGS Exposure Type - {% for field in form.fgs_exptype %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- FGS Anomalies - {% for field in form.fgs_anomalies %} -
- {{ field }} -
- {% endfor %} -
-
-
-
- -
-
-
-
-
- MIRI Subarray(s) - {% for field in form.miri_aper %} -
- {{ field }} -
- {% endfor %} -
-
-
-
- MIRI Filter(s) - {% for field in form.miri_filt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- MIRI Detector - {% for field in form.miri_detector %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- MIRI Read Pattern - {% for field in form.miri_readpatt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- MIRI Exposure Type - {% for field in form.miri_exptype %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- MIRI Anomalies - {% for field in form.miri_anomalies %} -
- {{ field }} -
- {% endfor %} -
-
-
-
- -
-
-
- NIRCam Aperture(s) - {% for field in form.nircam_aper %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRCam Filter(s) - {% for field in form.nircam_filt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRCam Detector - {% for field in form.nircam_detector %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRCam Read Pattern - {% for field in form.nircam_readpatt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRCam Exposure Type - {% for field in form.nircam_exptype %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRCam Anomalies - {% for field in form.nircam_anomalies %} -
- {{ field }} -
- {% endfor %} -
-
-
-
- -
-
-
- NIRISS Aperture(s) - {% for field in form.niriss_aper %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRISS Filter(s) - {% for field in form.niriss_filt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRISS Read Pattern - {% for field in form.niriss_readpatt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRISS Exposure Type - {% for field in form.niriss_exptype %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRISS Anomalies - {% for field in form.niriss_anomalies %} -
- {{ field }} -
- {% endfor %} -
-
-
-
- -
-
-
- NIRSpec Aperture(s) - {% for field in form.nirspec_aper %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRSpec Filter(s) - {% for field in form.nirspec_filt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRSpec Detector - {% for field in form.nirspec_detector %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRSpec Grating - {% for field in form.nirspec_grating %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRSpec Read Pattern - {% for field in form.nirspec_readpatt %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRSpec Exposure Type - {% for field in form.nirspec_exptype %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
- NIRSpec Anomalies - {% for field in form.nirspec_anomalies %} -
- {{ field }} -
- {% endfor %} -
-
-
-
-
-
- -
-{% endblock %} \ No newline at end of file diff --git a/jwql/website/apps/jwql/templates/api_landing.html b/jwql/website/apps/jwql/templates/api_landing.html index a80922f9d..5218ce734 100644 --- a/jwql/website/apps/jwql/templates/api_landing.html +++ b/jwql/website/apps/jwql/templates/api_landing.html @@ -23,131 +23,41 @@

Explore the JWQL API


What is the JWQL API?


- JWQL allows users to access the data available through our programmatic Application Programming Interface (API). In short, an API - accepts requests for services from a system, gathers the items in the request, and returns a response to the user. A popular example used to describe the + An Application Programming Interface (API) accepts requests for services from a system, gathers the items in the request, and returns a response to the user. A + popular example used to describe the functionality of an API is describing the role of a waiter in a restaurant. When you enter a restaurant and sit at your table, you are typically provided a menu containing all of the drink and food options. But how do you communicate your food and drink order to the kitchen staff? This is where the waiter (API) comes in, you provide the waiter with your order (request), they will communicate your order to the kitchen staff (system), and bring your food and drinks when they are prepared (the response). The waiter (API) disguises all of the steps that are necessary to getting your final product delivered (cooking the food, counting stock, knowing recipes, etc.) but, is the link to the user and the response they wish to obtain from a system. -

-
-
+

+ JWQL allows users to access some JWST proposal and filename information through our web-based API. Note that JWQL does + not have a programmatic API due to security concerns and the authorization layer between the app and the outside world. The services listed + below will return lists of proposal numbers or filenames in json format when the appropriate URL is entered into the browser. -
-

Examples


-

- Here we showcase a few examples of a few of the APIs that we offer. To see a full list of supported APIs click here. -


-
Obtaining Filenames Associated with a Rootname
-

- A user is interested in all of the filenames associated with a specific JWST rootname.

-
-                
-                    from urllib import request
-                    import json
-
-                    # Build the URL https://jwql.stsci.edu/api/roontname/service/
-                    url = 'https://jwql.stsci.edu/api/jw93025001001_02102_00001_nrca2/filenames/'
-                    req = request.urlopen(url)
-                    filenames = json.loads(req.read().decode())
-
-                    print(filenames)
-
-                    {'filenames': ['jw93025001001_02102_00001_nrca2_calints.fits',
-                                    'jw93025001001_02102_00001_nrca2_rate.fits',
-                                    'jw93025001001_02102_00001_nrca2_rateints.fits',
-                                    'jw93025001001_02102_00001_nrca2_trapsfilled.fits',
-                                    'jw93025001001_02102_00001_nrca2_uncal.fits']}
-                    
-             
- -
Obtaining Thumbnails with Instrument Name
-

- A user is interested in all of the thumbnail associated with the Fine Guider Sensors (FGS). -

-
-                    
-                    from urllib import request
-                    import json
-
-                    # Build the URL https://jwql.stsci.edu/api/instrument/service/
-                    url = 'https://jwql.stsci.edu/api/fgs/thumbnails/'
-                    req = request.urlopen(url)
-                    thumbnails = json.loads(req.read().decode())
-
-                    print(thumbnails)
-
-                    {'thumbnails': ['jw97012001003_02101_00001_guider1_trapsfilled_integ2.thumb',
-                                    'jw97012001001_02101_00001_guider1_rateints_integ1.thumb',
-                                    'jw97012001002_02101_00001_guider1_trapsfilled_integ2.thumb',
-                                    'jw97012001004_02101_00001_guider1_rateints_integ0.thumb',
-                                    'jw97012001003_02101_00001_guider1_rateints_integ0.thumb',
-                                    'jw97012001001_02101_00001_guider1_cal_integ0.thumb',
-                                    'jw97012001001_02101_00001_guider1_rateints_integ0.thumb',
-                                    'jw97012001004_02101_00001_guider1_rate_integ0.thumb',
-                                    'jw97012001004_02101_00001_guider1_trapsfilled_integ0.thumb',
-                                    'jw97012001001_02101_00001_guider1_trapsfilled_integ1.thumb',
-                                    'jw97012001004_02101_00001_guider1_trapsfilled_integ1.thumb',
-                                    ...,
-                                    ...]}
-                        
-                    
- -
Obtaining Filenames with Proposal ID
-

- A user is interested in all of the thumbnail associated with the Fine Guider Sensors (FGS). -

-
-                        
-                        from urllib import request
-                        import json
-
-                        # Build the URL https://jwql.stsci.edu/api/proposal/service/
-                        url = 'https://jwql.stsci.edu/api/86600/filenames/'
-                        req = request.urlopen(url)
-                        filenames = json.loads(req.read().decode())
-
-                        print(filenames)
-
-                        {'filenames': ['jw86600001001_02101_00001_guider1_cal.fits',
-                                       'jw86600001001_02101_00001_guider1_rate.fits',
-                                       'jw86600001001_02101_00001_guider1_trapsfilled.fits',
-                                       'jw86600001001_02101_00001_guider1_uncal.fits',
-                                       'jw86600001001_02101_00002_guider1_cal.fits',
-                                       'jw86600001001_02101_00002_guider1_rate.fits',
-                                       'jw86600001001_02101_00002_guider1_trapsfilled.fits',
-                                       'jw86600001001_02101_00002_guider1_uncal.fits',
-                                       'jw86600001001_02101_00003_guider1_cal.fits',
-                                       'jw86600001001_02101_00003_guider1_rate.fits',
-                                       'jw86600001001_02101_00003_guider1_trapsfilled.fits',
-                                       'jw86600001001_02101_00003_guider1_uncal.fits',
-                                       'jw86600001001_02101_00004_guider1_cal.fits',
-                                       'jw86600001001_02101_00004_guider1_rate.fits',
-                                       'jw86600001001_02101_00004_guider1_trapsfilled.fits',
-                                       ...,
-                                       ...]}
-                        
-                    
+

List of Available Services


  • All Proposals (https://jwql.stsci.edu/api/proposals/)
  • Instrument Proposals (https://jwql.stsci.edu/api/<instrument>/proposals/)
  • -
  • Preview Images by Instrument (https://jwql.stsci.edu/api/<instrument>/preview_images/)
  • -
  • Thumbnails by Instrument (https://jwql.stsci.edu/api/<instrument>/thumbnails/)
  • -
  • Filenames by Proposal (https://jwql.stsci.edu/api/<instrument>/proposals/)
  • +
  • Filenames by Proposal (https://jwql.stsci.edu/api/<proposal>/filenames/)
  • Preview Images by Proposal (https://jwql.stsci.edu/api/<proposal>/preview_images/)
  • +
  • Thumbnails by Proposal (https://jwql.stsci.edu/api/<proposal>/thumbnails/)
  • Filenames by Rootname (https://jwql.stsci.edu/api/<rootname>/filenames/)
  • Preview Images by Rootname (https://jwql.stsci.edu/api/<rootname>/preview_images/)
  • Thumbnails by Rootname (https://jwql.stsci.edu/api/<rootname>/thumbnails/)
  • +
  • Look Status by Instrument (https://jwql.stsci.edu/api/<instrument>/looks/)
  • +
  • Viewed Data by Instrument (https://jwql.stsci.edu/api/<instrument>/looks/viewed/)
  • +
  • New Data by Instrument (https://jwql.stsci.edu/api/<instrument>/looks/new/)
  • +
-

Where <instrument>, <rootname>, <proposal> are the values for the instrument name, rootname and proposal ID respectivly

+

where <instrument>, <rootname>, <proposal> are the values for the instrument name, rootname and proposal ID respectively.

-{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/archive.html b/jwql/website/apps/jwql/templates/archive.html index 25dc66f08..da93c43cb 100644 --- a/jwql/website/apps/jwql/templates/archive.html +++ b/jwql/website/apps/jwql/templates/archive.html @@ -17,14 +17,13 @@

Archived {{ inst }} Images

- -

+

-
+
@@ -32,17 +31,23 @@

Archived {{ inst }} Images

- +
- Sort by: - - + Sort by:
+ +
+ +
+ Download as:
+
@@ -72,4 +77,4 @@

Archived {{ inst }} Images

-{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/base.html b/jwql/website/apps/jwql/templates/base.html index d3256e49f..54bb04ba3 100644 --- a/jwql/website/apps/jwql/templates/base.html +++ b/jwql/website/apps/jwql/templates/base.html @@ -27,6 +27,11 @@ + + + + + {% block preamble %} {% endblock %} @@ -47,7 +52,7 @@ title="JWQL Logo"> JWQL {% if inst != "" %} - {{ inst }} + {{ inst }} {% endif %} @@ -84,6 +89,10 @@ + +
+ + + +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/explore_image.html b/jwql/website/apps/jwql/templates/explore_image.html new file mode 100644 index 000000000..62ac88c9b --- /dev/null +++ b/jwql/website/apps/jwql/templates/explore_image.html @@ -0,0 +1,135 @@ +{% extends "base.html" %} + +{% block preamble %} + + Explore {{ inst }} Image - JWQL + +{% endblock %} + +{% block content %} + +
+ +

Explore Mode

+

{{ file_root }}_{{ filetype }}.fits

+

+ View Image + View Proposal + Download File +

+

+ +
+ +
+ +
+
+
+
+
+
+
+
+
+
+
+
+ Loading ... +
+
+ + +
+ + + +

Data Settings

+
+
+ + +
+ +
+ Extension:   + {% for extension in extensions %} + {% if extension == 'SCI' %} + {{ extension }}      + {% else %} + {{ extension }}      + {% endif %} + {% endfor %} +
+ +
+
+ + +
+
+ + +
+
+
+ + +
+
+
+ + +
+
+ + +
+
+
+

+ Apply Settings +

+
+ + +
+ + +
+ +
+
Submit Anomaly
+ + + {% if form.errors %} +
+ {% for field in form %} + {% for error in field.errors %} +
+ {{ error|escape }} +
+ {% endfor %} + {% endfor %} +
+ {% endif %} + + + {{ csrf_input }} + + + {% for field in form %} + {% for subwidget in field.subwidgets %} +
  • {{subwidget}}
  • + {% endfor %} + {% endfor %} + + +
    +
    + +
    + + + +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/home.html b/jwql/website/apps/jwql/templates/home.html index e2c7c2009..0e4ed8a3a 100644 --- a/jwql/website/apps/jwql/templates/home.html +++ b/jwql/website/apps/jwql/templates/home.html @@ -24,9 +24,9 @@

    The JWST Quicklook Application

    -  Logo link to {{ inst }}
    {{ inst }}
    @@ -41,9 +41,6 @@

    The JWST Quicklook Application


    The JWST Quicklook Application (JWQL) is a database-driven web application and automation framework for use by the JWST instrument teams to monitor the health and stability of the JWST instruments. Visit our about page to learn more about the project.

    - - The JWQL application is currently under heavy development. The 1.0 release is expected in the fall of 2021. -

    Find a JWST Proposal or File

    @@ -84,7 +81,7 @@

    Find a JWST Proposal or File

    Query the Archive

    - Enter Query Form + Enter Query Form
    diff --git a/jwql/website/apps/jwql/templates/instrument.html b/jwql/website/apps/jwql/templates/instrument.html index e1b348762..ed5ce89e9 100644 --- a/jwql/website/apps/jwql/templates/instrument.html +++ b/jwql/website/apps/jwql/templates/instrument.html @@ -75,7 +75,7 @@

    Monitors

    - +
    @@ -93,7 +93,7 @@

    Documentation


    - +
    diff --git a/jwql/website/apps/jwql/templates/jwql_query.html b/jwql/website/apps/jwql/templates/jwql_query.html new file mode 100644 index 000000000..6274c5a83 --- /dev/null +++ b/jwql/website/apps/jwql/templates/jwql_query.html @@ -0,0 +1,559 @@ +{% extends "base.html" %} + +{% block preamble %} + + + + + Dynamic Query Form - JWQL + + +{% endblock %} + +{% block content %} + + + +
    +

    Dynamic Query Form

    +

    Use this form to query the archive for data from any instrument or combination of instruments.

    +

    Select at least one instrument, then any other constraints desired. + Feel free to leave fields blank if you do not want to further constrain the database search. +

    +
    + +
    +
    + {{ csrf_input }} + +
    +
    +
    +
    + Date Range +
    + + +
    +
    +
    + +
    +
    + Instruments +
    + {% for option in form.instrument %} +
    + {{ option }} +
    + {% endfor %} +
    +
    +
    + Proposal Category +
    + {% for option in form.proposal_category %} +
    + {{ option }} +
    + {% endfor %} +
    +
    +
    + Look Status +
    + {% for option in form.look_status %} +
    + {{ option }} +
    + {% endfor %} +
    +
    +
    + Sort Type +
    + {% for option in form.sort_type %} +
    + {{ option }} +
    + {% endfor %} +
    +
    +
    + Results Per Page +
    + {% for option in form.num_per_page %} +
    + {{ option }} +
    + {% endfor %} +
    +
    +
    + + +
    +
    + FGS Anomalies +
    + {% for field in form.fgs_anomalies %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + FGS Apertures +
    + {% for field in form.fgs_aper %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + FGS Detectors +
    + {% for field in form.fgs_detector %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + FGS Exposure Types +
    + {% for field in form.fgs_exptype %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + FGS Read Patterns +
    + {% for field in form.fgs_readpatt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    +
    + + +
    +
    + MIRI Anomalies +
    + {% for field in form.miri_anomalies %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + MIRI Detectors +
    + {% for field in form.miri_detector %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + MIRI Exposure Types +
    + {% for field in form.miri_exptype %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + MIRI Filters +
    + {% for field in form.miri_filt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + MIRI Read Patterns +
    + {% for field in form.miri_readpatt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + MIRI Subarrays +
    + {% for field in form.miri_subarray %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + + +
    +
    + NIRCam Anomalies +
    + {% for field in form.nircam_anomalies %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRCam Detectors +
    + {% for field in form.nircam_detector %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRCam Exposure Types +
    + {% for field in form.nircam_exptype %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRCam Filters +
    + {% for field in form.nircam_filt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRCam Pupils +
    + {% for field in form.nircam_pupil %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRCam Read Patterns +
    + {% for field in form.nircam_readpatt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRCam Subarrays +
    + {% for field in form.nircam_subarray %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    +
    + + +
    +
    + NIRISS Anomalies +
    + {% for field in form.niriss_anomalies %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRISS Detectors +
    + {% for field in form.niriss_detector %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRISS Exposure Types +
    + {% for field in form.niriss_exptype %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRISS Filters +
    + {% for field in form.niriss_filt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRISS Pupils +
    + {% for field in form.niriss_pupil %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRISS Read Patterns +
    + {% for field in form.niriss_readpatt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRISS Subarrays +
    + {% for field in form.niriss_subarray %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    +
    + + +
    +
    + NIRSpec Anomalies +
    + {% for field in form.nirspec_anomalies %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRSpec Apertures +
    + {% for field in form.nirspec_aper %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRSpec Detectors +
    + {% for field in form.nirspec_detector %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRSpec Exposure Types +
    + {% for field in form.nirspec_exptype %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRSpec Filters +
    + {% for field in form.nirspec_filt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRSpec Gratings +
    + {% for field in form.nirspec_grating %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    + +
    + NIRSpec Read Patterns +
    + {% for field in form.nirspec_readpatt %} +
    + {{ field }} +
    + {% endfor %} +
    +
    +
    +
    +
    +
    + + +
    +
    +
    + +
    +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/log_view.html b/jwql/website/apps/jwql/templates/log_view.html new file mode 100644 index 000000000..7418cb2a7 --- /dev/null +++ b/jwql/website/apps/jwql/templates/log_view.html @@ -0,0 +1,73 @@ +{% extends "base.html" %} + +{% block preamble %} + + Monitor Log Viewer - JWQL + +{% endblock %} + +{% block content %} + +
    +

    Explore JWQL monitoring logs through the web browser.


    + +

    + This page allows users to load monitoring logs into the web browsers. This feature + obtains logs from all JWQL servers (ops, dev, and test). + +

    +

    + +
    +
    + {{ csrf_input }} + + +

    Select JWQL Monitoring Log

    +
    + + + {% for log_name in all_logs %} + + {% endfor %} + +
    + +
    + + +


    + + + + {% if log_text %} +

    {{ log_name|safe }}


    + +
    +                    {{ log_text }}
    +                
    + {% endif %} + + + + + +
    +
    + +{% endblock %} \ No newline at end of file diff --git a/jwql/website/apps/jwql/templates/msata_monitor.html b/jwql/website/apps/jwql/templates/msata_monitor.html new file mode 100644 index 000000000..b9038c1f5 --- /dev/null +++ b/jwql/website/apps/jwql/templates/msata_monitor.html @@ -0,0 +1,59 @@ +{% extends "base.html" %} + +{% block preamble %} + + {{ inst }} MSATA Monitor- JWQL + +{% endblock %} + +{% block content %} + +
    + +

    {{ inst }} MSATA Monitor

    + +
    +
    +

    This monitor uses all MSATA data available. + The plots are updated regularly from new data in MAST.

    + +

    Instructions

    +

    For general MSATA monitoring, you only need to look at the first two plots + below: 1. MSATA Status and 2. MSATA Least Squares Residual V2-V3 Offsets.

    +

    Plot 1 shows the successful Visit IDs (blue points, which always correspond + to the second position - 1/2 shutter moved from the first), the unsuccessful + ones (red points), and the in-progress images (gray points, which always + correspond to the first position of the TA). MSATA can do up-to two tries, + so you may see 4 entries for the same program ID in the gray points.

    +

    Plot 2 shows the V2-V3 least square residual offsets: the closer to the + purple cross (the half facet coordinates) the better. The default limits of + the plot are the acceptable values. If you see any point at the edges or + outside the limits of (-0.5, 0.5) for both axis, please let any of the + NIRSpec TA experts know: Tracy Beck, Charles Proffit, and/or Tony Keyes.

    +
    + + +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    + Loading ... +
    +
    + + +
    + + + +
    + +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/query_submit.html b/jwql/website/apps/jwql/templates/query_submit.html index 6fd4b12f3..585e55111 100644 --- a/jwql/website/apps/jwql/templates/query_submit.html +++ b/jwql/website/apps/jwql/templates/query_submit.html @@ -19,14 +19,19 @@

    Images of Queried Instruments

    -

    +

    + + Return To Query Form +
    +
    -
    -
    +
    +
    +
    @@ -48,14 +53,19 @@

    Images of Queried Instruments

    -
    - + +
    +
    + +
    + + Return To Query Form + - - -{% endblock %} + +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/thumbnails.html b/jwql/website/apps/jwql/templates/thumbnails_per_obs.html similarity index 59% rename from jwql/website/apps/jwql/templates/thumbnails.html rename to jwql/website/apps/jwql/templates/thumbnails_per_obs.html index 6f3751ef8..84e0bc540 100644 --- a/jwql/website/apps/jwql/templates/thumbnails.html +++ b/jwql/website/apps/jwql/templates/thumbnails_per_obs.html @@ -14,18 +14,23 @@ {% block content %}
    + +
    +

    {{ inst }} Images

    - + -

    +

    -
    -
    +
    +
    +
    +

    @@ -69,8 +74,22 @@

    Proposal Information for {{ prop }}

    -
    - - - +
    + + {% if obs in obs_list %} + {% set index = obs_list.index(obs) %} + {% if index != 0 %} + < Previous + {% endif %} + + {% if obs_list.index(obs) != obs_list|length - 1 %} + Next > + {% endif %} + {% endif %} + +
    + + + + {% endblock %} diff --git a/jwql/website/apps/jwql/templates/view_exposure.html b/jwql/website/apps/jwql/templates/view_exposure.html new file mode 100644 index 000000000..a96c12153 --- /dev/null +++ b/jwql/website/apps/jwql/templates/view_exposure.html @@ -0,0 +1,238 @@ +{% extends "base.html" %} + +{% block preamble %} + + View {{ inst }} Exposure - JWQL + +{% endblock %} + +{% block content %} + +
    + +

    Exposure {{ group_root }}

    + + +
    +
    Proposal:
    +
    Observation:
    +
    Visit:
    + + + +
    + + {% if group_root in group_root_list %} + {% set index = group_root_list.index(group_root) %} + {% if index != 0 %} + < Previous + {% endif %} + + {% if index != group_root_list|length - 1 %} + Next > + {% endif %} + {% endif %} + +
    +
    + + + View File Type: + + i +
    +
    + {% for suffix in suffixes %} + {{ suffix }}      + {% endfor %} +
    +
    + + +
    + View Header + Explore Image +
    for
    + +
    + +
    +
    + +
    + + + +

    Integration: /

    +
    +
    +
    + {% if inst == 'NIRCam' %} +
    + + {% for detector1 in ['nrca2', 'nrca4', 'nrcb3', 'nrcb1'] %} + {% if detector1 in detectors %} +
    + + + + + +
    +
    + + + Preview image not found + + +
    + {% endif %} + {% endfor %} +
    +
    + + {% for detector2 in ['nrca1', 'nrca3', 'nrcb4', 'nrcb2'] %} + {% if detector2 in detectors %} +
    + + + + + +
    +
    + + + Preview image not found + + +
    + {% endif %} + {% endfor %} +
    +
    + + {% for detector3 in ['nrcalong', 'nrcblong'] %} + {% if detector3 in detectors %} +
    + + + + + +
    +
    + + + Preview image not found + + +
    + {% endif %} + {% endfor %} +
    + {% else %} +
    + + {% for detector in detectors %} +
    + + + + + +
    +
    + + + Preview image not found + + +
    + {% endfor %} +
    + {% endif %} + +
    + +
    + +
    +
    Submit Anomaly for Group
    +
    + + {% if form.errors %} +
    + {% for field in form %} + {% for error in field.errors %} +
    + {{ error|escape }} +
    + {% endfor %} + {% endfor %} +
    + {% endif %} + + + {{ csrf_input }} + + + {% for field in form %} +
      + {% for subwidget in field.subwidgets %} +
    • {{subwidget}}
    • + {% endfor %} +
    + {% endfor %} + +
    +
    +
    +
    + + {% if marked_viewed %} + + {% else %} + + {% endif %} + + {% if 'rate' in suffixes %} + + {% elif 'dark' in suffixes %} + + {% elif 'uncal' in suffixes %} + + {% elif suffixes|length == 1 %} + + {% else %} + Unable to show image for: {{suffixes}} + {% endif %} + +
    + +{% endblock %} diff --git a/jwql/website/apps/jwql/templates/view_header.html b/jwql/website/apps/jwql/templates/view_header.html index 5ee0d7474..76a1c8a5c 100644 --- a/jwql/website/apps/jwql/templates/view_header.html +++ b/jwql/website/apps/jwql/templates/view_header.html @@ -13,7 +13,7 @@

    {{ filename }}_{{file_type}}

    View Image - View Proposal + View Proposal

    diff --git a/jwql/website/apps/jwql/templates/view_image.html b/jwql/website/apps/jwql/templates/view_image.html index 4d70fa0ff..4355eb878 100644 --- a/jwql/website/apps/jwql/templates/view_image.html +++ b/jwql/website/apps/jwql/templates/view_image.html @@ -14,22 +14,23 @@

    {{ file_root }}

    -
    Proposal:
    +
    Proposal:
    Observation:
    Visit:
    Detector:
    +
    FITS Filename:
    JPG Filename:

    - View File Type: + View File Type: i
    -
    + {% for suffix in suffixes %} - {{ suffix }}      + {{ suffix }}      {% endfor %}

    @@ -37,6 +38,7 @@

    {{ file_root }}

    View Header + Explore Image

    @@ -45,18 +47,32 @@

    {{ file_root }}

    - {{ file_root }}_cal_integ0.jpg
    - - - + + +

    Integration: /

    + +
    + + {% if file_root in file_root_list %} + {% set index = file_root_list.index(file_root) %} + {% if index != 0 %} + < Previous + {% endif %} + + {% if index != file_root_list|length - 1 %} + Next > + {% endif %} + {% endif %} + +
    @@ -83,9 +99,11 @@
    Submit Anomaly
    {% for field in form %} +
      {% for subwidget in field.subwidgets %}
    • {{subwidget}}
    • {% endfor %} +
    {% endfor %} @@ -93,15 +111,20 @@
    Submit Anomaly
    + {% if marked_viewed %} + + {% else %} + + {% endif %} - {% if 'cal' in suffixes %} - - {% elif 'rate' in suffixes %} - + {% if 'rate' in suffixes %} + + {% elif 'dark' in suffixes %} + {% elif 'uncal' in suffixes %} - + {% elif suffixes|length == 1 %} - + {% else %} Unable to show image for: {{suffixes}} {% endif %} diff --git a/jwql/website/apps/jwql/templates/wata_monitor.html b/jwql/website/apps/jwql/templates/wata_monitor.html new file mode 100644 index 000000000..142a43c69 --- /dev/null +++ b/jwql/website/apps/jwql/templates/wata_monitor.html @@ -0,0 +1,55 @@ +{% extends "base.html" %} + +{% block preamble %} + + {{ inst }} WATA Monitor- JWQL + +{% endblock %} + +{% block content %} + +
    + +

    {{ inst }} WATA Monitor

    + +
    +
    +

    This monitor uses all WATA data available. + The plots are updated regularly from new data in MAST.

    + +

    Instructions

    +

    For general WATA monitoring, you only need to look at the first two plots + below: 1. WATA Status and 2. WATA Residual V2-V3 Offsets.

    +

    Plot 1 shows the successful Visit IDs (blue points) and the unsuccessful + ones (red points).

    +

    Plot 2 shows the V2-V3 residual offsets: the closer to (0,0) the better. The + default limits of the plot are the acceptable values. If you see any point + at the edges or outside the limits of (-0.5, 0.5) for both axis, please let + any of the NIRSpec TA experts know: Tracy Beck, Charles Proffit, and/or Tony Keyes.

    +
    + + +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    + Loading ... +
    +
    + + +
    + + + +
    + +{% endblock %} diff --git a/jwql/website/apps/jwql/tests/test_context_processors.py b/jwql/website/apps/jwql/tests/test_context_processors.py index c3cc90f9d..1d4fc5951 100644 --- a/jwql/website/apps/jwql/tests/test_context_processors.py +++ b/jwql/website/apps/jwql/tests/test_context_processors.py @@ -36,6 +36,14 @@ class TestBaseContext(TestCase): def test_base_context(self): """Tests the ``base_context`` function.""" + # These lines are needed in order to use the Django models in a standalone + # script (as opposed to code run as a result of a webpage request). If these + # lines are not run, the script will crash when attempting to import the + # Django models in the line below. + from django import setup + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + setup() + client = Client() request = client.get('{}/about/'.format(get_base_url())) request.COOKIES = {} @@ -43,6 +51,6 @@ def test_base_context(self): assert isinstance(context, dict) - keys = ['inst_list', 'tools', 'user', 'version'] + keys = ['inst_list', 'tools', 'version'] for key in keys: assert key in context diff --git a/jwql/website/apps/jwql/urls.py b/jwql/website/apps/jwql/urls.py index 823d91c63..afe91f441 100644 --- a/jwql/website/apps/jwql/urls.py +++ b/jwql/website/apps/jwql/urls.py @@ -13,6 +13,8 @@ - Matthew Bourque - Johannes Sahlmann - Teagan King + - Bryan Hilbert + - Maria Pena-Guerrero Use --- @@ -56,49 +58,76 @@ # Home path('', views.home, name='home'), - # MIRI-specific views - path('miri/miri_data_trending/', views.miri_data_trending, name='miri_data_trending'), - # NIRSpec-specific views - path('nirspec/nirspec_data_trending/', views.nirspec_data_trending, name='nirspec_data_trending'), + path('nirspec/msata_monitor/', monitor_views.msata_monitoring, name='msata_monitor'), + path('nirspec/wata_monitor/', monitor_views.wata_monitoring, name='wata_monitor'), # Common monitor views re_path(r'^(?P({}))/dark_monitor/$'.format(instruments), monitor_views.dark_monitor, name='dark_monitor'), re_path(r'^(?P({}))/bad_pixel_monitor/$'.format(instruments), monitor_views.bad_pixel_monitor, name='bad_pixel_monitor'), re_path(r'^(?P({}))/bias_monitor/$'.format(instruments), monitor_views.bias_monitor, name='bias_monitor'), re_path(r'^(?P({}))/readnoise_monitor/$'.format(instruments), monitor_views.readnoise_monitor, name='readnoise_monitor'), + re_path(r'^(?P({}))/edb_monitor/$'.format(instruments), monitor_views.edb_monitor, name='edb_monitor'), + re_path(r'^(?P({}))/cosmic_ray_monitor/$'.format(instruments), monitor_views.cosmic_ray_monitor, name='cosmic_ray_monitor'), # Main site views path('about/', views.about, name='about'), - path('anomaly_query/', views.anomaly_query, name='anomaly_query'), + path('jwql_query/', views.jwql_query, name='jwql_query'), path('api/', views.api_landing, name='api'), path('dashboard/', views.dashboard, name='dashboard'), path('download_table/', views.export, name='download_table'), path('edb/', views.engineering_database, name='edb'), path('jwqldb/', views.jwqldb_table_viewer, name='jwqldb'), path('jwqldb/', views.jwqldb_table_viewer, name='jwqldb_table_viewer'), + path('log_view/', views.log_view, name='log_view'), path('query_submit/', views.query_submit, name='query_submit'), re_path(r'^(?P({}))/$'.format(instruments), views.instrument, name='instrument'), re_path(r'^(?P({}))/archive/$'.format(instruments), views.archived_proposals, name='archive'), re_path(r'^(?P({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'), - re_path(r'^(?P({}))/(?P[\w]+)/$'.format(instruments), views.view_image, name='view_image'), + re_path(r'^(?P({}))/report/$'.format(instruments), views.download_report, name='download_report'), + re_path(r'^(?P({}))/(?P[\w-]+)/$'.format(instruments), views.view_image, name='view_image'), + re_path(r'^(?P({}))/(?P.+)_(?P.+)/explore_image/'.format(instruments), views.explore_image, name='explore_image'), re_path(r'^(?P({}))/(?P.+)_(?P.+)/header/'.format(instruments), views.view_header, name='view_header'), - re_path(r'^(?P({}))/archive/(?P[\d]{{1,5}})/$'.format(instruments), views.archive_thumbnails, name='archive_thumb'), + re_path(r'^(?P({}))/archive/(?P[\d]{{1,5}})/obs(?P[\d]{{1,3}})/$'.format(instruments), + views.archive_thumbnails_per_observation, name='archive_thumb_per_obs'), + re_path(r'^(?P({}))/exposure/(?P[\w-]+)/$'.format(instruments), views.view_exposure, name='view_exposure'), # AJAX views re_path('ajax/query_submit/', views.archive_thumbnails_query_ajax, name='archive_thumb_query_ajax'), re_path(r'^ajax/(?P({}))/archive/$'.format(instruments), views.archived_proposals_ajax, name='archive_ajax'), - re_path(r'^ajax/(?P({}))/archive/(?P[\d]{{1,5}})/$'.format(instruments), views.archive_thumbnails_ajax, name='archive_thumb_ajax'), + re_path(r'^ajax/(?P({}))/(?P.+)_(?P.+)/explore_image/$'.format(instruments), views.explore_image_ajax, + name='explore_image_ajax'), + re_path(r'^ajax/(?P({}))/(?P.+)_(?P.+)/explore_image/plot_(?P(true|false))/ext_(?P.+)/int1_(?P.+)/grp1_(?P.+)/int2_(?P.+)/grp2_(?P.+)/$'.format(instruments), + views.explore_image_ajax, name='explore_image_ajax'), + re_path(r'^ajax/(?P({}))/archive/(?P[\d]{{1,5}})/obs(?P[\d]{{1,3}})/$'.format(instruments), + views.archive_thumbnails_ajax, name='archive_thumb_ajax'), + re_path(r'^ajax/viewed/(?P.+)/$', views.toggle_viewed_ajax, name='toggle_viewed_ajax'), + re_path(r'^ajax/viewed_group/(?P.+)/(?P(viewed|new|Viewed|New))/$', + views.set_viewed_ajax, name='set_viewed_ajax'), + re_path(r'^ajax/image_group/$', views.save_image_group_ajax, name='save_image_group_ajax'), + re_path(r'^ajax/image_sort/$', views.save_image_sort_ajax, name='save_image_sort_ajax'), + re_path(r'^ajax/navigate_filter/$', views.save_page_navigation_data_ajax, name='save_page_navigation_data_ajax'), + re_path('ajax/nirspec/msata/', monitor_views.msata_monitoring_ajax, name='msata_ajax'), + re_path('ajax/nirspec/wata/', monitor_views.wata_monitoring_ajax, name='wata_ajax'), # REST API views path('api/proposals/', api_views.all_proposals, name='all_proposals'), - re_path(r'^api/(?P({}))/proposals/$'.format(instruments), api_views.instrument_proposals, name='instrument_proposals'), - re_path(r'^api/(?P({}))/preview_images/$'.format(instruments), api_views.preview_images_by_instrument, name='preview_images_by_instrument'), - re_path(r'^api/(?P({}))/thumbnails/$'.format(instruments), api_views.thumbnails_by_instrument, name='thumbnails_by_instrument'), - re_path(r'^api/(?P[\d]{1,5})/filenames/$', api_views.filenames_by_proposal, name='filenames_by_proposal'), - re_path(r'^api/(?P[\d]{1,5})/preview_images/$', api_views.preview_images_by_proposal, name='preview_images_by_proposal'), - re_path(r'^api/(?P[\d]{1,5})/thumbnails/$', api_views.thumbnails_by_proposal, name='preview_images_by_proposal'), - re_path(r'^api/(?P[\w]+)/filenames/$', api_views.filenames_by_rootname, name='filenames_by_rootname'), - re_path(r'^api/(?P[\w]+)/preview_images/$', api_views.preview_images_by_rootname, name='preview_images_by_rootname'), - re_path(r'^api/(?P[\w]+)/thumbnails/$', api_views.thumbnails_by_rootname, name='thumbnails_by_rootname'), + re_path(r'^api/(?P({}))/proposals/$'.format(instruments), + api_views.instrument_proposals, name='instrument_proposals'), + re_path(r'^api/(?P[\d]{1,5})/filenames/$', + api_views.filenames_by_proposal, name='filenames_by_proposal'), + re_path(r'^api/(?P[\d]{1,5})/preview_images/$', + api_views.preview_images_by_proposal, name='preview_images_by_proposal'), + re_path(r'^api/(?P[\d]{1,5})/thumbnails/$', + api_views.thumbnails_by_proposal, name='preview_images_by_proposal'), + re_path(r'^api/(?P[\w]+)/filenames/$', + api_views.filenames_by_rootname, name='filenames_by_rootname'), + re_path(r'^api/(?P[\w]+)/preview_images/$', + api_views.preview_images_by_rootname, name='preview_images_by_rootname'), + re_path(r'^api/(?P[\w]+)/thumbnails/$', + api_views.thumbnail_by_rootname, name='thumbnail_by_rootname'), + re_path(r'^api/(?P({}))/looks/$'.format(instruments), + api_views.instrument_looks, name='instrument_looks'), + re_path(r'^api/(?P({}))/looks/(?P(viewed|new))/$'.format(instruments), + api_views.instrument_looks, name='instrument_looks_by_status'), ] diff --git a/jwql/website/apps/jwql/views.py b/jwql/website/apps/jwql/views.py index 34c2ddfec..3fc36eaa7 100644 --- a/jwql/website/apps/jwql/views.py +++ b/jwql/website/apps/jwql/views.py @@ -14,6 +14,11 @@ - Johannes Sahlmann - Teagan King - Mees Fix + - Bryan Hilbert + - Maria Pena-Guerrero + - Bradley Sappington + - Melanie Clarke + Use --- @@ -38,44 +43,54 @@ """ import csv +import datetime +import json +import glob +import logging import os +import operator +import socket from bokeh.layouts import layout from bokeh.embed import components +from django.core.paginator import Paginator from django.http import HttpResponse, JsonResponse -from django.contrib import messages from django.shortcuts import redirect, render +from sqlalchemy import inspect from jwql.database.database_interface import load_connection -from jwql.utils import anomaly_query_config -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, MONITORS, URL_DICT -from jwql.utils.utils import filesystem_path, get_base_url, get_config, query_unformat +from jwql.utils import monitor_utils +from jwql.utils.interactive_preview_image import InteractivePreviewImg +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, URL_DICT, QUERY_CONFIG_TEMPLATE, QueryConfigKeys +from jwql.utils.utils import filename_parser, get_base_url, get_config, get_rootnames_for_instrument_proposal, query_unformat from .data_containers import build_table -from .data_containers import data_trending from .data_containers import get_acknowledgements -from .data_containers import get_current_flagged_anomalies +from .data_containers import get_available_suffixes +from .data_containers import get_anomaly_form from .data_containers import get_dashboard_components from .data_containers import get_edb_components -from .data_containers import get_filenames_by_instrument, mast_query_filenames_by_instrument +from .data_containers import get_explorer_extension_names from .data_containers import get_header_info from .data_containers import get_image_info -from .data_containers import get_proposal_info -from .data_containers import get_thumbnails_all_instruments -from .data_containers import nirspec_trending +from .data_containers import get_instrument_looks +from .data_containers import get_rootnames_from_query from .data_containers import random_404_page from .data_containers import text_scrape from .data_containers import thumbnails_ajax from .data_containers import thumbnails_query_ajax -from .forms import InstrumentAnomalySubmitForm -from .forms import AnomalyQueryForm +from .forms import JwqlQueryForm from .forms import FileSearchForm +if not os.environ.get("READTHEDOCS"): + from .models import RootFileInfo +from astropy.io import fits -def anomaly_query(request): +def jwql_query(request): """The anomaly query form page""" - form = AnomalyQueryForm(request.POST or None) + form = JwqlQueryForm(request.POST or None) + form.fields['sort_type'].initial = request.session.get('image_sort', 'Recent') if request.method == 'POST': if form.is_valid(): @@ -88,9 +103,12 @@ def anomaly_query(request): query_configs[instrument]['exptypes'] = [query_unformat(i) for i in form.cleaned_data['{}_exptype'.format(instrument)]] query_configs[instrument]['readpatts'] = [query_unformat(i) for i in form.cleaned_data['{}_readpatt'.format(instrument)]] query_configs[instrument]['gratings'] = [query_unformat(i) for i in form.cleaned_data['{}_grating'.format(instrument)]] + query_configs[instrument]['subarrays'] = [query_unformat(i) for i in form.cleaned_data['{}_subarray'.format(instrument)]] + query_configs[instrument]['pupils'] = [query_unformat(i) for i in form.cleaned_data['{}_pupil'.format(instrument)]] query_configs[instrument]['anomalies'] = [query_unformat(i) for i in form.cleaned_data['{}_anomalies'.format(instrument)]] - all_filters, all_apers, all_detectors, all_exptypes, all_readpatts, all_gratings, all_anomalies = {}, {}, {}, {}, {}, {}, {} + all_filters, all_apers, all_detectors, all_exptypes = {}, {}, {}, {} + all_readpatts, all_gratings, all_subarrays, all_pupils, all_anomalies = {}, {}, {}, {}, {} for instrument in query_configs: all_filters[instrument] = query_configs[instrument]['filters'] all_apers[instrument] = query_configs[instrument]['apertures'] @@ -98,30 +116,39 @@ def anomaly_query(request): all_exptypes[instrument] = query_configs[instrument]['exptypes'] all_readpatts[instrument] = query_configs[instrument]['readpatts'] all_gratings[instrument] = query_configs[instrument]['gratings'] + all_subarrays[instrument] = query_configs[instrument]['subarrays'] + all_pupils[instrument] = query_configs[instrument]['pupils'] all_anomalies[instrument] = query_configs[instrument]['anomalies'] - anomaly_query_config.INSTRUMENTS_CHOSEN = form.cleaned_data['instrument'] - anomaly_query_config.ANOMALIES_CHOSEN_FROM_CURRENT_ANOMALIES = all_anomalies - anomaly_query_config.APERTURES_CHOSEN = all_apers - anomaly_query_config.FILTERS_CHOSEN = all_filters - anomaly_query_config.EXPTIME_MIN = str(form.cleaned_data['exp_time_min']) - anomaly_query_config.EXPTIME_MAX = str(form.cleaned_data['exp_time_max']) - anomaly_query_config.DETECTORS_CHOSEN = all_detectors - anomaly_query_config.EXPTYPES_CHOSEN = all_exptypes - anomaly_query_config.READPATTS_CHOSEN = all_readpatts - anomaly_query_config.GRATINGS_CHOSEN = all_gratings - + parameters = QUERY_CONFIG_TEMPLATE.copy() + parameters[QueryConfigKeys.INSTRUMENTS] = form.cleaned_data['instrument'] + parameters[QueryConfigKeys.LOOK_STATUS] = form.cleaned_data['look_status'] + parameters[QueryConfigKeys.DATE_RANGE] = form.cleaned_data['date_range'] + parameters[QueryConfigKeys.PROPOSAL_CATEGORY] = form.cleaned_data['proposal_category'] + parameters[QueryConfigKeys.SORT_TYPE] = form.cleaned_data['sort_type'] + parameters[QueryConfigKeys.ANOMALIES] = all_anomalies + parameters[QueryConfigKeys.APERTURES] = all_apers + parameters[QueryConfigKeys.FILTERS] = all_filters + parameters[QueryConfigKeys.DETECTORS] = all_detectors + parameters[QueryConfigKeys.EXP_TYPES] = all_exptypes + parameters[QueryConfigKeys.READ_PATTS] = all_readpatts + parameters[QueryConfigKeys.GRATINGS] = all_gratings + parameters[QueryConfigKeys.SUBARRAYS] = all_subarrays + parameters[QueryConfigKeys.PUPILS] = all_pupils + + # save the query config settings to a session + request.session['query_config'] = parameters return redirect('/query_submit') context = {'form': form, 'inst': ''} - template = 'anomaly_query.html' + template = 'jwql_query.html' return render(request, template, context) -def miri_data_trending(request): - """Generate the ``MIRI DATA-TRENDING`` page +def about(request): + """Generate the ``about`` page Parameters ---------- @@ -134,26 +161,16 @@ def miri_data_trending(request): Outgoing response sent to the webpage """ - template = "miri_data_trending.html" - variables, dash = data_trending() - - context = { - 'dashboard': dash, - 'inst': '', # Leave as empty string or instrument name; Required for navigation bar - 'inst_list': JWST_INSTRUMENT_NAMES_MIXEDCASE, # Do not edit; Required for navigation bar - 'tools': MONITORS, # Do not edit; Required for navigation bar - 'user': None # Do not edit; Required for authentication - } - - # append variables to context - context.update(variables) + template = 'about.html' + acknowledgements = get_acknowledgements() + context = {'acknowledgements': acknowledgements, + 'inst': ''} - # Return a HTTP response with the template and dictionary of variables return render(request, template, context) -def nirspec_data_trending(request): - """Generate the ``MIRI DATA-TRENDING`` page +def api_landing(request): + """Generate the ``api`` page Parameters ---------- @@ -166,64 +183,40 @@ def nirspec_data_trending(request): Outgoing response sent to the webpage """ - template = "nirspec_data_trending.html" - variables, dash = nirspec_trending() - - context = { - 'dashboard': dash, - 'inst': '', # Leave as empty string or instrument name; Required for navigation bar - 'inst_list': JWST_INSTRUMENT_NAMES_MIXEDCASE, # Do not edit; Required for navigation bar - 'tools': MONITORS, # Do not edit; Required for navigation bar - 'user': None # Do not edit; Required for authentication - } - - # append variables to context - context.update(variables) + template = 'api_landing.html' + context = {'inst': ''} - # Return a HTTP response with the template and dictionary of variables return render(request, template, context) -def about(request): - """Generate the ``about`` page - - Parameters - ---------- - request : HttpRequest object - Incoming request from the webpage - - Returns - ------- - HttpResponse object - Outgoing response sent to the webpage +def save_page_navigation_data_ajax(request): """ - - template = 'about.html' - acknowledgements = get_acknowledgements() - context = {'acknowledgements': acknowledgements, - 'inst': ''} - - return render(request, template, context) - - -def api_landing(request): - """Generate the ``api`` page + Takes a bracketless string of rootnames and expstarts, and saves it as a session dictionary Parameters ---------- - request : HttpRequest object + request: HttpRequest object Incoming request from the webpage + Returns ------- HttpResponse object Outgoing response sent to the webpage """ - template = 'api_landing.html' - context = {'inst': ''} - - return render(request, template, context) + # a string of the form " 'rootname1'='expstart1', 'rootname2'='expstart2', ..." + if request.method == 'POST': + navigate_dict = request.POST.get('navigate_dict') + # Save session in form {rootname:expstart} + rootname_expstarts = dict() + for item in navigate_dict.split(','): + rootname, expstart = item.split("=") + rootname_expstarts[rootname] = float(expstart) + request.session['navigation_data'] = rootname_expstarts + + context = {'item': request.session['navigation_data']} + return JsonResponse(context, json_dumps_params={'indent': 2}) def archived_proposals(request, inst): @@ -267,47 +260,19 @@ def archived_proposals_ajax(request, inst): JsonResponse object Outgoing response sent to the webpage """ - # Ensure the instrument is correctly capitalized - inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] - filesystem = get_config()['filesystem'] + # Read in the json file created by data_containers.create_archived_proposals_context + # and use as the context + output_dir = get_config()['outputs'] + context_file = os.path.join(output_dir, 'archive_page', f'{inst}_archive_context.json') - # Get list of all files for the given instrument - filename_query = mast_query_filenames_by_instrument(inst) - filenames_public = get_filenames_by_instrument(inst, restriction='public', query_response=filename_query) - filenames_proprietary = get_filenames_by_instrument(inst, restriction='proprietary', query_response=filename_query) - - # Determine locations to the files - filenames = [] - for filename in filenames_public: - try: - relative_filepath = filesystem_path(filename, check_existence=False) - full_filepath = os.path.join(filesystem, 'public', relative_filepath) - filenames.append(full_filepath) - except ValueError: - print('Unable to determine filepath for {}'.format(filename)) - for filename in filenames_proprietary: - try: - relative_filepath = filesystem_path(filename, check_existence=False) - full_filepath = os.path.join(filesystem, 'proprietary', relative_filepath) - filenames.append(full_filepath) - except ValueError: - print('Unable to determine filepath for {}'.format(filename)) - - # Gather information about the proposals for the given instrument - proposal_info = get_proposal_info(filenames) - - context = {'inst': inst, - 'num_proposals': proposal_info['num_proposals'], - 'thumbnails': {'proposals': proposal_info['proposals'], - 'thumbnail_paths': proposal_info['thumbnail_paths'], - 'num_files': proposal_info['num_files']}} + with open(context_file, 'r') as obj: + context = json.load(obj) return JsonResponse(context, json_dumps_params={'indent': 2}) -def archive_thumbnails(request, inst, proposal): - """Generate the page listing all archived images in the database - for a certain proposal +def archive_thumbnails_ajax(request, inst, proposal, observation=None): + """Generate the page listing archived images by proposal. Parameters ---------- @@ -317,27 +282,26 @@ def archive_thumbnails(request, inst, proposal): Name of JWST instrument proposal : str Number of observing proposal + observation : str + Observation number within the proposal Returns ------- - HttpResponse object + JsonResponse object Outgoing response sent to the webpage """ # Ensure the instrument is correctly capitalized inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] - proposal_meta = text_scrape(proposal) - - template = 'thumbnails.html' - context = {'inst': inst, - 'prop': proposal, - 'prop_meta': proposal_meta, - 'base_url': get_base_url()} + data = thumbnails_ajax(inst, proposal, obs_num=observation) + data['thumbnail_sort'] = request.session.get("image_sort", "Recent") + data['thumbnail_group'] = request.session.get("image_group", "Exposure") - return render(request, template, context) + save_page_navigation_data(request, data) + return JsonResponse(data, json_dumps_params={'indent': 2}) -def archive_thumbnails_ajax(request, inst, proposal): +def archive_thumbnails_per_observation(request, inst, proposal, observation): """Generate the page listing all archived images in the database for a certain proposal @@ -349,23 +313,48 @@ def archive_thumbnails_ajax(request, inst, proposal): Name of JWST instrument proposal : str Number of observing proposal + observation : str + Observation number within the proposal Returns ------- - JsonResponse object + HttpResponse object Outgoing response sent to the webpage """ # Ensure the instrument is correctly capitalized inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + proposal_meta = text_scrape(proposal) - data = thumbnails_ajax(inst, proposal) + # Get a list of all observation numbers for the proposal + # This will be used to create buttons for observation-specific + # pages + rootnames = get_rootnames_for_instrument_proposal(inst, proposal) + all_obs = [] + for root in rootnames: + try: + all_obs.append(filename_parser(root)['observation']) + except KeyError: + pass + + obs_list = sorted(list(set(all_obs))) + + sort_type = request.session.get('image_sort', 'Recent') + group_type = request.session.get('image_group', 'Exposure') + template = 'thumbnails_per_obs.html' + context = {'base_url': get_base_url(), + 'inst': inst, + 'obs': observation, + 'obs_list': obs_list, + 'prop': proposal, + 'prop_meta': proposal_meta, + 'sort': sort_type, + 'group': group_type} - return JsonResponse(data, json_dumps_params={'indent': 2}) + return render(request, template, context) def archive_thumbnails_query_ajax(request): - """Generate the page listing all archived images in the database - for a certain proposal + """Generate the page listing archived images by query parameters. Parameters ---------- @@ -382,26 +371,46 @@ def archive_thumbnails_query_ajax(request): Outgoing response sent to the webpage """ - # Ensure the instrument is correctly capitalized - instruments_list = [] - for instrument in anomaly_query_config.INSTRUMENTS_CHOSEN: - instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()] - instruments_list.append(instrument) - - parameters = anomaly_query_config.PARAMETERS - - # when parameters only contains nirspec as instrument, thumbnails still end up being all niriss data - thumbnails = get_thumbnails_all_instruments(parameters) - - anomaly_query_config.THUMBNAILS = thumbnails - - data = thumbnails_query_ajax(thumbnails) - + parameters = request.session.get("query_config", QUERY_CONFIG_TEMPLATE.copy()) + filtered_rootnames = get_rootnames_from_query(parameters) + + paginator = Paginator(filtered_rootnames, + parameters[QueryConfigKeys.NUM_PER_PAGE]) + page_number = request.GET.get("page", 1) + page_obj = paginator.get_page(page_number) + + data = thumbnails_query_ajax(page_obj.object_list) + data['thumbnail_sort'] = parameters[QueryConfigKeys.SORT_TYPE] + data['thumbnail_group'] = request.session.get("image_group", "Exposure") + + # add top level parameters for summarizing + data['query_config'] = {} + for key in parameters: + value = parameters[key] + if isinstance(value, dict): + for subkey in value: + subvalue = value[subkey] + if subvalue: + data['query_config'][f'{key}_{subkey}'] = subvalue + elif value: + data['query_config'][key] = value + + # pass pagination info + if page_obj.has_previous(): + data['previous_page'] = page_obj.previous_page_number() + data['current_page'] = page_obj.number + if page_obj.has_next(): + data['next_page'] = page_obj.next_page_number() + data['total_pages'] = paginator.num_pages + data['total_files'] = paginator.count + + request.session['image_sort'] = parameters[QueryConfigKeys.SORT_TYPE] + save_page_navigation_data(request, data) return JsonResponse(data, json_dumps_params={'indent': 2}) def dashboard(request): - """Generate the dashbaord page + """Generate the dashboard page Parameters ---------- @@ -419,12 +428,16 @@ def dashboard(request): db = get_dashboard_components(request) pie_graph = db.dashboard_instrument_pie_chart() files_graph = db.dashboard_files_per_day() + useage_graph = db.dashboard_disk_usage() + directories_usage_graph, central_store_usage_graph = db.dashboard_central_store_data_volume() filetype_bar = db.dashboard_filetype_bar_chart() table_columns, table_values = db.dashboard_monitor_tracking() grating_plot = db.dashboard_exposure_count_by_filter() anomaly_plot = db.dashboard_anomaly_per_instrument() - plot = layout([[files_graph], [pie_graph, filetype_bar], + plot = layout([[files_graph, useage_graph], + [directories_usage_graph, central_store_usage_graph], + [pie_graph, filetype_bar], [grating_plot, anomaly_plot]], sizing_mode='stretch_width') script, div = components(plot) @@ -440,6 +453,43 @@ def dashboard(request): return render(request, template, context) +def download_report(request, inst): + """Download data report by look status. + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage. + inst : str + The JWST instrument of interest. + + Returns + ------- + response : HttpResponse object + Outgoing response sent to the webpage + """ + # check for filter criteria passed in request + kwargs = dict() + for filter_name in ['look', 'exp_type', 'cat_type', 'proposal', 'sort_as']: + kwargs[filter_name] = request.GET.get(filter_name) + + # get all observation looks from file info model + # and join with observation descriptors + keys, looks = get_instrument_looks(inst, **kwargs) + + today = datetime.datetime.now().strftime('%Y%m%d') + filename = f'{inst.lower()}_report_{today}.csv' + response = HttpResponse(content_type='text/csv') + response['Content-Disposition'] = f'attachment; filename="{filename}"' + + writer = csv.writer(response) + writer.writerow(keys) + for row in looks: + writer.writerow(row.values()) + + return response + + def engineering_database(request): """Generate the EDB page. @@ -551,7 +601,7 @@ def instrument(request, inst): return render(request, template, context) -def jwqldb_table_viewer(request): +def jwqldb_table_viewer(request, tablename_param=None): """Generate the JWQL Table Viewer view. Parameters @@ -571,7 +621,10 @@ def jwqldb_table_viewer(request): try: tablename = request.POST['db_table_select'] except KeyError: - tablename = None + if tablename_param: + tablename = tablename_param + else: + tablename = None if tablename is None: table_meta = None @@ -579,7 +632,7 @@ def jwqldb_table_viewer(request): table_meta = build_table(tablename) _, _, engine, _ = load_connection(get_config()['connection_string']) - all_jwql_tables = engine.table_names() + all_jwql_tables = inspect(engine).get_table_names() if 'django_migrations' in all_jwql_tables: all_jwql_tables.remove('django_migrations') # No necessary information. @@ -620,6 +673,51 @@ def jwqldb_table_viewer(request): return render(request, template, context) +def log_view(request): + """Access JWQL monitoring logs from the web app. + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + + template = 'log_view.html' + log_path = get_config()['log_dir'] + log_name = request.POST.get('log_submit', None) + + hostname = socket.gethostname() + + if 'dljwql' in hostname: + server = 'dev' + elif 'tljwql' in hostname: + server = 'test' + else: + server = 'ops' + + full_log_paths = sorted(glob.glob(os.path.join(log_path, server, '*', '*')), reverse=True) + full_log_paths = [log for log in full_log_paths if not os.path.basename(log).startswith('.')] + log_dictionary = {os.path.basename(path): path for path in full_log_paths} + + if log_name: + with open(log_dictionary[log_name]) as f: + log_text = f.read() + else: + log_text = None + + context = {'inst': '', + 'all_logs': log_dictionary, + 'log_text': log_text, + 'log_name': log_name} + + return render(request, template, context) + + def not_found(request, *kwargs): """Generate a ``not_found`` page @@ -657,22 +755,14 @@ def query_submit(request): """ template = 'query_submit.html' - - parameters = {} - parameters['instruments'] = anomaly_query_config.INSTRUMENTS_CHOSEN - parameters['apertures'] = anomaly_query_config.APERTURES_CHOSEN - parameters['filters'] = anomaly_query_config.FILTERS_CHOSEN - parameters['detectors'] = anomaly_query_config.DETECTORS_CHOSEN - parameters['exposure_types'] = anomaly_query_config.EXPTYPES_CHOSEN - parameters['read_patterns'] = anomaly_query_config.READPATTS_CHOSEN - parameters['gratings'] = anomaly_query_config.GRATINGS_CHOSEN - parameters['anomalies'] = anomaly_query_config.ANOMALIES_CHOSEN_FROM_CURRENT_ANOMALIES - - anomaly_query_config.PARAMETERS = parameters - + sort_type = request.session.get('image_sort', 'Recent') + group_type = request.session.get('image_group', 'Exposure') + page_number = request.GET.get("page", 1) context = {'inst': '', - 'base_url': get_base_url() - } + 'base_url': get_base_url(), + 'sort': sort_type, + 'group': group_type, + 'page': page_number} return render(request, template, context) @@ -731,8 +821,8 @@ def view_header(request, inst, filename, filetype): return render(request, template, context) -def view_image(request, inst, file_root, rewrite=False): - """Generate the image view page +def explore_image(request, inst, file_root, filetype): + """Generate the explore image page. Parameters ---------- @@ -741,9 +831,9 @@ def view_image(request, inst, file_root, rewrite=False): inst : str Name of JWST instrument file_root : str - FITS filename of selected image in filesystem - rewrite : bool, optional - Regenerate the jpg preview of `file` if it already exists? + FITS file_root of selected image in filesystem + filetype : str + Type of file (e.g. ``uncal``) Returns ------- @@ -753,34 +843,433 @@ def view_image(request, inst, file_root, rewrite=False): # Ensure the instrument is correctly capitalized inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + template = 'explore_image.html' + + # get explorable extensions from header + extensions = get_explorer_extension_names(file_root, filetype) + + fits_file = file_root + '_' + filetype + '.fits' + # Get image info containing all paths to fits files + image_info_list = get_image_info(file_root) + # Find index of our fits file + fits_index = next(ix for ix, fits_path in enumerate(image_info_list['all_files']) if fits_file in fits_path) + # get full path of fits file to open and extract extension info + full_fits_file = image_info_list['all_files'][fits_index] + extension_ints = {} + extension_groups = {} + + # gather extension group/integration information to send + if os.path.isfile(full_fits_file): + with fits.open(full_fits_file) as hdulist: + for exten in extensions: + dims = hdulist[exten].shape + if len(dims) == 4: + extension_ints[exten], extension_groups[exten], ny, nx = dims + elif len(dims) == 3: + extension_groups[exten] = 0 + extension_ints[exten], ny, nx = dims + else: + extension_ints[exten] = 0 + extension_groups[exten] = 0 + else: + raise FileNotFoundError(f'WARNING: {full_fits_file} does not exist!') - template = 'view_image.html' - image_info = get_image_info(file_root, rewrite) + form = get_anomaly_form(request, inst, file_root) - # Determine current flagged anomalies - current_anomalies = get_current_flagged_anomalies(file_root, inst) + context = {'inst': inst, + 'file_root': file_root, + 'filetype': filetype, + 'extensions': extensions, + 'extension_groups': extension_groups, + 'extension_ints': extension_ints, + 'base_url': get_base_url(), + 'form': form} - # Create a form instance - form = InstrumentAnomalySubmitForm(request.POST or None, instrument=inst.lower(), initial={'anomaly_choices': current_anomalies}) + return render(request, template, context) - # If this is a POST request and the form is filled out, process the form data - if request.method == 'POST' and 'anomaly_choices' in dict(request.POST): - anomaly_choices = dict(request.POST)['anomaly_choices'] - if form.is_valid(): - form.update_anomaly_table(file_root, 'unknown', anomaly_choices) - messages.success(request, "Anomaly submitted successfully") + +def explore_image_ajax(request, inst, file_root, filetype, line_plots='false', low_lim=None, high_lim=None, + ext_name="SCI", int1_nr=None, grp1_nr=None, int2_nr=None, grp2_nr=None): + """Generate the page listing all archived images in the database + for a certain proposal + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + inst : str + Name of JWST instrument + file_root : str + FITS file_root of selected image in filesystem + filetype : str + Type of file (e.g. ``uncal``) + line_plots : str + If 'true', column and row plots will be computed and shown with the image. + low_lim : str + Signal value to use as the lower limit of the displayed image. If "None", it will be calculated using the ZScale function + high_lim : str + Signal value to use as the upper limit of the displayed image. If "None", it will be calculated using the ZScale function + ext_name : str + Extension to implement in interactive preview image ("SCI", "DQ", "GROUPDQ", "PIXELDQ", "ERR"...) + + Returns + ------- + JsonResponse object + Outgoing response sent to the webpage + """ + # Ensure the instrument is correctly capitalized + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + + # Get image info containing all paths to fits files + image_info_list = get_image_info(file_root) + + # Save fits file name to use for bokeh image + fits_file = file_root + '_' + filetype + '.fits' + # Find index of our fits file + fits_index = next(ix for ix, fits_path in enumerate(image_info_list['all_files']) if fits_file in fits_path) + + # get full path of fits file to send to InteractivePreviewImg + full_fits_file = image_info_list['all_files'][fits_index] + # sent floats not strings to init + if low_lim == "None": + low_lim = None + if high_lim == "None": + high_lim = None + if int1_nr == "None": + int1_nr = None + if grp1_nr == "None": + grp1_nr = None + if int2_nr == "None": + int2_nr = None + if grp2_nr == "None": + grp2_nr = None + + if low_lim is not None: + low_lim = float(low_lim) + if high_lim is not None: + high_lim = float(high_lim) + + group = None + integ = None + if (grp1_nr): + if (grp2_nr): + group = [int(grp1_nr), int(grp2_nr)] else: - messages.error(request, "Failed to submit anomaly") + group = int(grp1_nr) + if (int1_nr): + if (int2_nr): + integ = [int(int1_nr), int(int2_nr)] + else: + integ = int(int1_nr) + + if str(line_plots).strip().lower() == 'true': + line_plots = True + else: + line_plots = False + + int_preview_image = InteractivePreviewImg( + full_fits_file, low_lim=low_lim, high_lim=high_lim, extname=ext_name, + group=group, integ=integ, line_plots=line_plots) + + context = {'inst': "inst", + 'script': int_preview_image.script, + 'div': int_preview_image.div} + + return JsonResponse(context, json_dumps_params={'indent': 2}) + + +def save_image_group_ajax(request): + """Save the latest selected group type in the session. + + Parameters + ---------- + request : HttpRequest + The incoming request. + + Returns + ------- + JsonResponse + Object containing the group value as set in the session (key: 'item'). + """ + image_group = request.GET['group_type'] + request.session['image_group'] = image_group + context = {'item': request.session['image_group']} + return JsonResponse(context, json_dumps_params={'indent': 2}) + + +def save_image_sort_ajax(request): + """Save the latest selected sort type in the session. + + Parameters + ---------- + request : HttpRequest + The incoming request. + + Returns + ------- + JsonResponse + Object containing the sort value as set in the session (key: 'item'). + """ + # a string of the form " 'rootname1'='expstart1', 'rootname2'='expstart2', ..." + image_sort = request.GET['sort_type'] + + request.session['image_sort'] = image_sort + context = {'item': request.session['image_sort']} + return JsonResponse(context, json_dumps_params={'indent': 2}) + + +def save_page_navigation_data(request, data): + """ + Save the data from the current page in the session. + + Enables navigating to the next or previous page. Current sort options + are Ascending/Descending, and Recent/Oldest. + + Parameters + ---------- + request: HttpRequest object + data: dictionary + the data dictionary to be returned from the calling view function + nav_by_date_range: boolean + when viewing an image, will the next/previous buttons be sorted by date? (the other option is rootname) + """ + navigate_data = {} + for rootname in data['file_data']: + navigate_data[rootname] = data['file_data'][rootname]['expstart'] + + request.session['navigation_data'] = navigate_data + return + + +def set_viewed_ajax(request, group_root, status): + """Update the model's "viewed" field for a group of files + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + group_root : str + Group root name, matching filename roots up to + but not including the detector. + status : {'new', 'viewed'} + Value to set: 'new' for viewed=False, 'viewed' for viewed=True. + + Returns + ------- + JsonResponse object + Outgoing response sent to the webpage + """ + viewed = (str(status).strip().lower() == 'viewed') + + root_file_info = RootFileInfo.objects.filter( + root_name__startswith=group_root) + for root_file in root_file_info: + root_file.viewed = viewed + root_file.save() + + # check actual status as set + marked_viewed = all([rf.viewed for rf in root_file_info]) # Build the context - context = {'inst': inst, - 'prop_id': file_root[2:7], + context = {'marked_viewed': marked_viewed} + return JsonResponse(context, json_dumps_params={'indent': 2}) + + +def toggle_viewed_ajax(request, file_root): + """Update the model's "mark_viewed" field and save in the database + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + file_root : str + FITS file_root of selected image in filesystem + + Returns + ------- + JsonResponse object + Outgoing response sent to the webpage + """ + root_file_info = RootFileInfo.objects.get(root_name=file_root) + root_file_info.viewed = not root_file_info.viewed + root_file_info.save() + + # Build the context + context = {'marked_viewed': root_file_info.viewed} + return JsonResponse(context, json_dumps_params={'indent': 2}) + + +def view_exposure(request, inst, group_root): + """Generate the exposure view page. + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage. + inst : str + Name of JWST instrument. + group_root : str + Exposure group, matching file root names up to but not + including the detector. + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + + # Ensure the instrument is correctly capitalized + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + + template = 'view_exposure.html' + image_info = get_image_info(group_root) + + # Get the proposal id and obsnum from the group root name + prop_id = group_root[2:7] + obsnum = group_root[7:10] + + # Get available suffixes in a consistent order. + suffixes = get_available_suffixes(image_info['suffixes'], + return_untracked=False) + + # Get the anomaly submission form + form = get_anomaly_form(request, inst, group_root) + + # if we get to this page without any navigation data, + # previous/next buttons will be hidden + navigation_data = request.session.get('navigation_data', {}) + + # For time based sorting options, sort to "Recent" first to create sorting consistency when times are the same. + # This is consistent with how Tinysort is utilized in jwql.js->sort_by_thumbnails + sort_type = request.session.get('image_sort', 'Recent') + if sort_type in ['Descending']: + matching_rootfiles = sorted(navigation_data, reverse=True) + elif sort_type in ['Recent']: + navigation_data = dict(sorted(navigation_data.items())) + navigation_data = dict(sorted(navigation_data.items(), key=operator.itemgetter(1), reverse=True)) + matching_rootfiles = list(navigation_data.keys()) + elif sort_type in ['Oldest']: + navigation_data = dict(sorted(navigation_data.items())) + navigation_data = dict(sorted(navigation_data.items(), key=operator.itemgetter(1))) + matching_rootfiles = list(navigation_data.keys()) + else: + matching_rootfiles = sorted(navigation_data) + + # pick out group names from matching root files + group_root_list = [] + for rootname in matching_rootfiles: + try: + other_group_root = filename_parser(rootname)['group_root'] + except ValueError: + continue + if other_group_root not in group_root_list: + group_root_list.append(other_group_root) + + # Get our current views RootFileInfo model and send our "viewed/new" information + root_file_info = RootFileInfo.objects.filter(root_name__startswith=group_root) + viewed = all([rf.viewed for rf in root_file_info]) + + # Build the context + context = {'base_url': get_base_url(), + 'group_root_list': group_root_list, + 'inst': inst, + 'prop_id': prop_id, + 'obsnum': obsnum, + 'group_root': group_root, + 'suffixes': suffixes, + 'num_ints': image_info['num_ints'], + 'available_ints': image_info['available_ints'], + 'total_ints': image_info['total_ints'], + 'detectors': sorted(image_info['detectors']), + 'form': form, + 'marked_viewed': viewed} + + return render(request, template, context) + + +def view_image(request, inst, file_root): + """Generate the image view page + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + inst : str + Name of JWST instrument + file_root : str + FITS filename of selected image in filesystem + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + + # Ensure the instrument is correctly capitalized + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + + template = 'view_image.html' + image_info = get_image_info(file_root) + + # Put suffixes in a consistent order. Check if any of the + # suffixes are not in the list that specifies order. + suffixes, untracked_suffixes = get_available_suffixes( + image_info['suffixes'], return_untracked=True) + + if len(untracked_suffixes) > 0: + module = os.path.basename(__file__).strip('.py') + monitor_utils.initialize_instrument_monitor(module) + logging.warning((f'In view_image(), for {inst}, {file_root}, ' + f'the following suffixes are present in the data, ' + f'but not in EXPOSURE_PAGE_SUFFIX_ORDER in ' + f'constants.py: {untracked_suffixes} ' + 'Please add them, so that they will appear in a ' + 'consistent order on the webpage.')) + + form = get_anomaly_form(request, inst, file_root) + + prop_id = file_root[2:7] + + # if we get to this page without any navigation data (i.e. direct link), + # just use the file_root with no expstart time + # navigate_data is dict of format rootname:expstart + navigation_data = request.session.get('navigation_data', {file_root: 0}) + + # For time based sorting options, sort to "Recent" first to create + # sorting consistency when times are the same. + # This is consistent with how Tinysort is utilized in + # jwql.js->sort_by_thumbnails + sort_type = request.session.get('image_sort', 'Recent') + if sort_type in ['Descending']: + file_root_list = sorted(navigation_data, reverse=True) + elif sort_type in ['Recent']: + navigation_data = dict(sorted(navigation_data.items())) + navigation_data = dict(sorted(navigation_data.items(), + key=operator.itemgetter(1), reverse=True)) + file_root_list = list(navigation_data.keys()) + elif sort_type in ['Oldest']: + navigation_data = dict(sorted(navigation_data.items())) + navigation_data = dict(sorted(navigation_data.items(), + key=operator.itemgetter(1))) + file_root_list = list(navigation_data.keys()) + else: + file_root_list = sorted(navigation_data) + + # Get our current views RootFileInfo model and send our "viewed/new" information + root_file_info = RootFileInfo.objects.get(root_name=file_root) + + # Build the context + context = {'base_url': get_base_url(), + 'file_root_list': file_root_list, + 'inst': inst, + 'prop_id': prop_id, + 'obsnum': file_root[7:10], 'file_root': file_root, - 'jpg_files': image_info['all_jpegs'], - 'fits_files': image_info['all_files'], - 'suffixes': image_info['suffixes'], + 'suffixes': suffixes, 'num_ints': image_info['num_ints'], 'available_ints': image_info['available_ints'], - 'form': form} + 'total_ints': image_info['total_ints'], + 'form': form, + 'marked_viewed': root_file_info.viewed} return render(request, template, context) diff --git a/jwql/website/jwql_proj/settings.py b/jwql/website/jwql_proj/settings.py index b7e512c5d..6dc070ca7 100644 --- a/jwql/website/jwql_proj/settings.py +++ b/jwql/website/jwql_proj/settings.py @@ -34,7 +34,8 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = get_config()['django_secret_key'] +if not os.environ.get("READTHEDOCS"): + SECRET_KEY = get_config()['django_secret_key'] # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True @@ -86,7 +87,8 @@ 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages' + 'django.contrib.messages.context_processors.messages', + 'django.template.context_processors.request' ], }, }, @@ -106,10 +108,7 @@ # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), - }, + 'default': get_config()['django_database'] } # Password validation @@ -151,6 +150,15 @@ STATIC_URL = '/static/' STATICFILES_DIRS = [ - os.path.join(BASE_DIR, "static/"), + os.path.join(BASE_DIR, "apps", "jwql", "static/"), get_config()['jwql_dir'] ] + +# Use integer for auto primary key, as was default before django 3.2 +DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + +# Add trusted origins for CSRF origin checking +CSRF_TRUSTED_ORIGINS = ['https://jwql.stsci.edu', + 'https://jwql-test.stsci.edu', + 'https://jwql-dev.stsci.edu', + 'https://127.0.0.1'] diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..24f701717 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,75 @@ +[project] +name = "jwql" +description = "The James Webb Space Telescope Quicklook Project" +readme = "README.md" +authors = [ + { name = "Matthew Bourque" }, + { name = "Lauren Chambers" }, + { name = "Misty Cracraft" }, + { name = "Mike Engesser" }, + { name = "Mees Fix" }, + { name = "Joe Filippazzo" }, + { name = "Bryan Hilbert" }, +] +keywords = ["astronomy", "python"] +classifiers = ["Programming Language :: Python"] +dependencies = [ + "asdf", + "astropy", + "astroquery", + "bokeh<3", + "crds", + "cryptography", + "django", + "inflection", + "jinja2", + "jsonschema", + "jwst", + "jwst_reffiles", + "matplotlib", + "numpy", + "numpydoc", + "pandas", + "psycopg2-binary", + "pysiaf", + "pyvo", + "scipy", + "sqlalchemy", + "stdatamodels", + "wtforms", +] +dynamic = ["version"] + +[project.optional-dependencies] +test = [ + "pytest", + "pytest-cov", + "pytest-mock", +] +docs = [ + "sphinx", + "sphinx_rtd_theme", + "stsci_rtd_theme", +] + +[project.license] +file = "LICENSE" +content-type = "text/plain" + +[build-system] +requires = ["setuptools>=68.0.0", "numpy", "wheel", "setuptools_scm"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +namespaces = false + +[tool.setuptools_scm] + +[tool.pytest] +junit_family = "xunit2" + +[tool.pytest.ini_options] +norecursedirs = ["jwql/website/apps/jwql/static"] \ No newline at end of file diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index d2120289b..000000000 --- a/pytest.ini +++ /dev/null @@ -1,2 +0,0 @@ -[pytest] -junit_family = xunit2 diff --git a/requirements.txt b/requirements.txt index bce27ba5f..6a7f670cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,34 +1,40 @@ -asdf==2.11.0 -astropy==5.0.2 -astroquery==0.4.5 -bandit==1.7.4 -beautifulsoup4==4.10.0 -bokeh==2.4.2 -codecov==2.1.12 -crds==11.10.0 -cryptography==36.0.0 -django==3.2.5 -flake8==4.0.1 +astropy==5.3.3 +astroquery==0.4.6 +bandit==1.7.5 +beautifulsoup4==4.12.2 +bokeh==2.4.3 +celery==5.3.4 +cryptography==41.0.4 +django==4.2.5 inflection==0.5.1 -ipython==8.1.0 -jinja2==3.0.3 -jsonschema==4.4.0 -jwst==1.4.3 -matplotlib==3.5.1 -nodejs==10.13.0 -numpy==1.22.2 -numpydoc==1.2 -pandas==1.4.1 -psycopg2==2.9.3 -pysiaf==0.15.0 -pytest==7.0.1 -pytest-cov==3.0.0 -scipy==1.8.0 -sphinx==4.4.0 -sphinx_rtd_theme==1.0.0 -sqlalchemy==1.4.31 -stdatamodels==0.4.0 -stsci_rtd_theme==0.0.2 -twine==3.8.0 +ipython==8.16.1 +jinja2==3.1.2 +jsonschema==4.19.1 +jwst==1.12.3 +matplotlib==3.8.0 +nodejs==20.8.0 +numpy==1.25.2 +numpydoc==1.5.0 +pandas==2.1.1 +psycopg2-binary==2.9.7 +pysiaf==0.20.0 +pysqlite3==0.5.2 +pytest==7.4.2 +pytest-cov==4.1.0 +pytest-mock==3.11.1 +pyvo==1.4.2 +pyyaml==6.0.1 +redis==5.0.0 +ruff==0.0.292 +scipy==1.9.3 +selenium==4.13.0 +setuptools==68.2.2 +sphinx==7.2.6 +sphinx_rtd_theme==1.3.0 +sqlalchemy==2.0.21 +stdatamodels==1.8.3 +stsci_rtd_theme==1.0.0 +twine==4.0.2 +vine==5.0.0 wtforms==3.0.1 git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles diff --git a/rtd_requirements.txt b/rtd_requirements.txt index 98926a73c..abac6b128 100644 --- a/rtd_requirements.txt +++ b/rtd_requirements.txt @@ -1,12 +1,15 @@ -cython==0.29.28 -django==3.2.5 -docutils==0.18.1 -flake8==4.0.1 -jwst==1.4.3 -jwql==1.0.0 -pygments==2.11.2 -pytest==7.0.1 -sphinx>=2 -stsci_rtd_theme==0.0.2 +sphinx_automodapi>=0.15.0 +bokeh==2.4.3 +celery==5.3.4 +cython>=3.0.0 +django==4.2.5 +docutils>=0.18.1 +jwst==1.12.3 +pygments==2.16.1 +pytest==7.4.2 +redis==5.0.0 +selenium==4.13.0 +sphinx==7.2.6 +stsci_rtd_theme==1.0.0 tomli==2.0.1 git+https://github.com/spacetelescope/jwst_reffiles diff --git a/setup.py b/setup.py index ff289a9fd..2101b2a04 100644 --- a/setup.py +++ b/setup.py @@ -1,64 +1,6 @@ import numpy as np from setuptools import setup -from setuptools import find_packages - -VERSION = '1.1.1' - -AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, ' -AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Shannon Osborne, Maria Pena-Guerrero, Johannes Sahlmann, ' -AUTHORS += 'Ben Sunnquist, Brian York' - -DESCRIPTION = 'The James Webb Space Telescope Quicklook Project' - -REQUIRES = [ - 'asdf>=2.3.3', - 'astropy>=3.2.1', - 'astroquery>=0.3.9', - 'bandit', - 'bokeh', - 'codecov', - 'crds', - 'cryptography', - 'django<=3.1.7', - 'flake8', - 'inflection', - 'ipython', - 'jinja2', - 'jsonschema', - 'jwedb>=0.0.3', - 'jwst', - 'jwst_reffiles', - 'matplotlib', - 'nodejs', - 'numpy', - 'numpydoc', - 'pandas', - 'psycopg2', - 'pysiaf', - 'pytest', - 'pytest-cov', - 'scipy', - 'sphinx', - 'sphinx_rtd_theme', - 'sqlalchemy', - 'stdatamodels', - 'stsci_rtd_theme', - 'twine', - 'wtforms' -] setup( - name='jwql', - version=VERSION, - description=DESCRIPTION, - url='https://github.com/spacetelescope/jwql.git', - author=AUTHORS, - author_email='jwql@stsci.edu', - license='BSD', - keywords=['astronomy', 'python'], - classifiers=['Programming Language :: Python'], - packages=find_packages(), - install_requires=REQUIRES, - include_package_data=True, include_dirs=[np.get_include()], ) diff --git a/style_guide/README.md b/style_guide/README.md index cf05e8a79..3ea777048 100644 --- a/style_guide/README.md +++ b/style_guide/README.md @@ -2,7 +2,7 @@ ================== This document serves as a style guide for all `jwql` software development. Any requested contribution to the `jwql` code repository should be checked against this guide, and any violation of the guide should be fixed before the code is committed to -the `master` or `develop` branch. Please refer to the accompanying [`example.py`](https://github.com/spacetelescope/jwql/blob/master/style_guide/example.py) script for a example code that abides by this style guide. +the `master` or `develop` branch. Please refer to the accompanying [`example.py`](https://github.com/spacetelescope/jwql/blob/master/style_guide/example.py) script for an example of code that abides by this style guide. Prerequisite Reading -------------------- @@ -42,7 +42,7 @@ The following items should never be committed in the `jwql` source code or GitHu If `jwql` code needs to be aware of this information, it should be stored in a configuration file that is not part of the `jwql` repository. -Additionally, developers of this project should be mindful of application security risks, and should adhere to the [OWASP Top 10](https://www.owasp.org/images/7/72/OWASP_Top_10-2017_%28en%29.pdf.pdf) as best possible. +Additionally, developers of this project should be mindful of application security risks, and should adhere to the [OWASP Top 10](https://www.owasp.org/images/7/72/OWASP_Top_10-2017_%28en%29.pdf.pdf) as well as possible. `jwql`-specific Code Standards @@ -50,7 +50,7 @@ Additionally, developers of this project should be mindful of application securi `jwql` code shall adhere to the `PEP8` conventions save for the following exceptions: - - Lines of code need not to be restricted to 79 characters. However, it is encouraged to break up obnoxiously long lines into several lines if it benefits the overall readability of the code + - Lines of code need not be restricted to 79 characters. However, it is encouraged to break up obnoxiously long lines into several lines if it benefits the overall readability of the code Additionally, the code shall adhere to the following special guidelines: @@ -63,8 +63,8 @@ Additionally, developers of this project should be mindful of application securi `jwql` code shall adhere to the `PEP257` and `numpydoc` conventions. The following are further recommendations: -- Each module should have at minimum a description, `Authors` and `Use` section. -- Each function/method should have at minimum a description, `Parameters` (if necessary), and `Returns` (if necessary) sections +- Each module should have at minimum a description, and `Authors` and `Use` sections. +- Each function/method should have at minimum a description, and `Parameters` (if necessary) and `Returns` (if necessary) sections. `jwql`-Specific Logging Standards @@ -77,7 +77,7 @@ Additionally, developers of this project should be mindful of application securi To the extent possible, `jwql` shall define frequently-used variable types/values consistently. A list of adopted standards is provided below: -- **JWST instrument names**: In all internal references and structures (e.g. dictionaries) instrument names shall be all lower-case strings, i.e. one of `fgs`, `miri`, `niriss`, `nircam`, `nirspec`. When variations are required for interfaces, e.g. `Nircam` for MAST, `NIRCam` or `NIRCAM` for SIAF, etc. these should be defined as dictionaries in [`jwql/utils/constants.py`](https://github.com/spacetelescope/jwql/blob/master/jwql/utils/constants.py) and imported from there. +- **JWST instrument names**: In all internal references and structures (e.g. dictionaries) instrument names shall be all lower-case strings, e.g. one of `fgs`, `miri`, `niriss`, `nircam`, `nirspec`. When variations are required for interfaces, e.g. `Nircam` for MAST, `NIRCam` or `NIRCAM` for SIAF, etc. these should be defined as dictionaries in [`jwql/utils/constants.py`](https://github.com/spacetelescope/jwql/blob/master/jwql/utils/constants.py) and imported from there. - **Program/proposal identifiers**: JWST program IDs shall be stored and referred to internally as integers and parsed to strings only when needed. For example, the inputs `"001144"` and `"1144"` shall both be converted to an integer variable with value `1144`. diff --git a/style_guide/accessibility_guidelines.md b/style_guide/accessibility_guidelines.md index 060700f06..52322bba4 100644 --- a/style_guide/accessibility_guidelines.md +++ b/style_guide/accessibility_guidelines.md @@ -17,7 +17,7 @@ Every contribution to the web application must include the following features: - When plotting data, use both colors AND symbols to distinguish datasets - Avoid using images of text - Text - - Ensure sure headers are in order (i.e. `

    ` before `

    `, `

    ` before `

    `, etc.) and do not skip (i.e. `

    ` comes after `

    `, not `

    `) + - Ensure headers are in order (i.e. `

    ` before `

    `, `

    ` before `

    `, etc.) and do not skip (i.e. `

    ` comes after `

    `, not `

    `) - Don't write explicit navigation instructions involving placement or direction (e.g. "in the upper right corner") - Differentiate links from normal text (different color and/or underline) - Describe the link purpose in link text (e.g. avoid using "here" as link text) diff --git a/style_guide/example.py b/style_guide/example.py index 1d2e80f08..47db3b3a0 100644 --- a/style_guide/example.py +++ b/style_guide/example.py @@ -3,9 +3,9 @@ The module docstring should have a one line description (as above) as well as a more detailed description in a paragraph below the one line -description (i.e. this). Module dosctring lines should be limited to -72 characters. Monospace font can be achived with ``two single -forward-apostrophes``. The description should provided a detailed +description (e.g. this). Module dosctring lines should be limited to +72 characters. Monospace font can be achieved with ``two single +backticks``. The description should provided a detailed overview of the purpose of the module (what does it do) and how it achieves this purpose (how does it do it). @@ -17,7 +17,7 @@ Use --- - This module can be executed via the command line as such: + This module can be executed via the command line as follows: :: python example.py [path] [-f|--filter filter] @@ -28,7 +28,7 @@ Optional arguments: - ``-f|--filter`` - The filter to process. if not provided, the + ``-f|--filter`` - The filter to process. If not provided, the defult value is "F606W". Dependencies @@ -46,7 +46,7 @@ Here is where any references to external sources related to the code can be listed or described. For example: - Code adopted from IDL routine written by Hilbert et al., 2009. + Code adapted from IDL routine written by Hilbert et al., 2009. Notes -----