diff --git a/.github/workflows/test-on-push-to-main.yml b/.github/workflows/test-on-push-to-main.yml
new file mode 100644
index 00000000..81362735
--- /dev/null
+++ b/.github/workflows/test-on-push-to-main.yml
@@ -0,0 +1,76 @@
+name: 'test-on-push-to-main'
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ test-typescript-codebase:
+ runs-on: ubuntu-latest
+ steps:
+ # check-out repo and set-up python
+ - name: Check out repository
+ uses: actions/checkout@v3
+ - name: Set up NodeJS with Yarn
+ uses: actions/setup-node@v3
+ with:
+ node-version: '14'
+ cache: 'yarn'
+ cache-dependency-path: packages/ui/yarn.lock
+ - name: Install dependencies
+ run: yarn install
+ working-directory: packages/ui
+ - name: Build frontend
+ run: yarn build
+ working-directory: packages/ui
+
+ test-python-codebase:
+ runs-on: ubuntu-latest
+ steps:
+ # check-out repo and set-up python
+ - name: Check out repository
+ uses: actions/checkout@v3
+ - name: Set up Python 3.10.6
+ id: setup-python
+ uses: actions/setup-python@v3
+ with:
+ python-version: 3.10.6
+
+ # install & configure poetry
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+ with:
+ virtualenvs-create: true
+ virtualenvs-in-project: true
+ installer-parallel: true
+
+ # load cached venv if cache exists
+ - name: Load cached venv
+ id: cached-poetry-dependencies
+ uses: actions/cache@v3
+ with:
+ path: .venv
+ key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('poetry.lock') }}
+
+ # install dependencies if cache does not exist
+ - name: Install dependencies
+ if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
+ run: poetry install --no-interaction --no-root
+
+ # install your root project, if required
+ - name: Install library
+ run: poetry install --no-interaction
+
+ # run test suite
+ - name: Run mypy static type analysis
+ run: |
+ source .venv/bin/activate
+ bash scripts/run_type_analysis.sh
+ - name: Run pytest tests
+ run: |
+ source .venv/bin/activate
+ pytest -m "ci" --cov=packages tests
+ coverage report
diff --git a/.gitignore b/.gitignore
index a7eaec88..167aa7fc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,10 +3,12 @@
*.pyc
.pytest_cache
__pycache__
+.coverage
+test-tmp/
# config
config/config.json
-config/config.original.json
+config/config.tmp.json
config/*.lock
.env
packages/cli/alias/pyra-cli.bat
@@ -31,8 +33,9 @@ runtime-data/
pyra-core-process-state.json
logs/persistent-state.json
logs/activity/*.json
-logs/vbdsd/*.jpg
-logs/vbdsd-autoexposure/*.jpg
+logs/**/*.jpg
+!logs/helios/.gitkeep
+logs/helios/*
# pyra ui
packages/electron-ui/node_modules/
@@ -43,4 +46,6 @@ packages/ui/node_modules/
# development
.vscode/
.idea
-hidden
\ No newline at end of file
+hidden
+website/.docusaurus
+website/node_modules
\ No newline at end of file
diff --git a/README.md b/README.md
index 15be3c1c..9c04731d 100644
--- a/README.md
+++ b/README.md
@@ -1,58 +1,33 @@
-**Work in progress! Do not use it yet.**
+# PYRA
-
-
-# Pyra Version 4
-
-## Set up with
-
-Dependency management using https://python-poetry.org/.
-
-```bash
-# create a virtual environment (copy of the python interpreter)
-python3.10 -m venv .venv
-
-# activate virtual environment
-source .venv/bin/activate # unix
-.venv\Scripts\activate.bat # windows
-
-# when your venv is activated your command line has a (.venv) prefix
-# install dependencies using poetry
-poetry install
-```
+**For installation, see https://github.com/tum-esm/pyra-setup-tool**
-## Configuration Files
-
-Two types of config files:
+## Repository Management & CI
-1. **`setup.json`** contains all information about the static setup: Which parts does the enclosure consist of? This should be written once and only changes when the hardware changes.
-2. **`parameters.json`** contains all dynamic parameters that can be set when operating pyra. This should be manipulated either via the CLI (coming soon) or the graphical user interface (coming soon, similar to Pyra version <= 3).
+**Branches:** `development-...`, `integration-x.y.z`, `main`, `release`, `prerelease`
-For each file, there is a `*.default.json` file present in the repository. A full reference can be found here soon.
+**Hierarchy:** `development-...` contains stuff in active development and will be merged into `integration-x.y.z`. `integration-x.y.z`: Is used during active integration on the stations and will be merged into `main`. `main` contains the latest running version that passed the integration and will be merged into `release` once enough changes have accumulated. Any branch can be released into `prerelease` to run the CI-Pipeline on demand. `prerelease` will not be merged into anything else and is just used for development purposes.
-
+**Continuous Integration:** The CI-Pipeline runs every time a commit/a series of commits is added to the `release` branch. The CI compiles and bundles the frontend code into an installable windows-application. Then it creates a new release draft and attaches the `.msi` file to the draft. We can then manually add the release description and submit the release.
-## CLI
+**Testing (not in an active CI):** We could add automated tests to the main- and integration branches. However, most things we could test make use of OPUS, Camtracker, Helios, or the enclosure, hence we can only do a subset of our tests in an isolated CI environment without the system present.
-_documentation coming soon_
+**Issues:** Things we work on are managed via issues - which are bundled into milestones (each milestone represents a release). The issues should be closed once they are on the `main` branch via commit messages ("closes #87", "fixes #70", etc. see [this list of keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword)). Issues that have been finished but are not on the `main` branch yet, can be labeled using the white label "implemented". This way, we can oversee incompleted issues, but don't forget to merge them.
-Make `pyra-cli` command available:
+
-```bash
-alias pyra-cli=".../.venv/bin/python .../packages/cli/main.py"
-```
+## Elements
-TODO: Find a way to set up autocompletion on the `pyra-cli` command.
+### FileLocks
-
+Since we have parallel processes interacting with state, config, and logs, we need to control the access to these resources to avoid race conditions. We use the python module [filelock](https://pypi.org/project/filelock/) for this. Before working with one of these resources, a process has to acquire a file lock for the respective `.state.lock`/`.config.lock`/`.logs.lock` file. When it cannot acquire a lock for 10 seconds, it throws a `TimeoutError`.
-## Graphical User Interface
+When running into a deadlock, with timeout errors (never happened to us yet), the CLI command `pyra-cli remove-filelocks` removes all present lock files.
-_documentation coming soon_
+### Version numbers
-Less Secure Apps have been deactivated.
-https://support.google.com/accounts/answer/6010255?hl=de&visit_id=637914296292859831-802637670&p=less-secure-apps&rd=1
+Versions up to `4.0.4` are alpha and beta versions that should not be used regularly. PYRA can be generally used starting from version `4.0.5`.
-Solution: Use "App passwords", requires 2FA
+Inside the codebase, the version number is included 3 times: `pyproject.toml`, `packages/ui/package.json`, `packages/ui/src-tauri/tauri.conf.json`. The script `scripts/sync_version_numbers.py` takes the version number from the `.toml` file and pastes it into the other locations. This script can be run in a [git-hook](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks).
diff --git a/config/config.default.json b/config/config.default.json
index 22e5196d..8ccf8c5e 100644
--- a/config/config.default.json
+++ b/config/config.default.json
@@ -1,5 +1,6 @@
{
"general": {
+ "version": "4.0.5",
"seconds_per_core_interval": 30,
"test_mode": false,
"station_id": "...",
@@ -34,7 +35,7 @@
"measurement_triggers": {
"consider_time": true,
"consider_sun_elevation": true,
- "consider_vbdsd": false,
+ "consider_helios": false,
"start_time": {
"hour": 7,
"minute": 0,
@@ -48,5 +49,6 @@
"min_sun_elevation": 0
},
"tum_plc": null,
- "vbdsd": null
+ "helios": null,
+ "upload": null
}
diff --git a/config/vbdsd.config.default.json b/config/helios.config.default.json
similarity index 78%
rename from config/vbdsd.config.default.json
rename to config/helios.config.default.json
index 15c13fd3..ab085e81 100644
--- a/config/vbdsd.config.default.json
+++ b/config/helios.config.default.json
@@ -3,5 +3,6 @@
"evaluation_size": 15,
"seconds_per_interval": 6,
"measurement_threshold": 0.6,
+ "edge_detection_threshold": 0.02,
"save_images": false
}
diff --git a/config/upload.config.default.json b/config/upload.config.default.json
new file mode 100644
index 00000000..6ed79785
--- /dev/null
+++ b/config/upload.config.default.json
@@ -0,0 +1,12 @@
+{
+ "host": "1.2.3.4",
+ "user": "...",
+ "password": "...",
+ "upload_ifgs": false,
+ "src_directory_ifgs": "...",
+ "dst_directory_ifgs": "...",
+ "remove_src_ifgs_after_upload": false,
+ "upload_helios": false,
+ "dst_directory_helios": "...",
+ "remove_src_helios_after_upload": true
+}
diff --git a/logs/vbdsd-autoexposure/.gitkeep b/logs/helios-autoexposure/.gitkeep
similarity index 100%
rename from logs/vbdsd-autoexposure/.gitkeep
rename to logs/helios-autoexposure/.gitkeep
diff --git a/logs/vbdsd/.gitkeep b/logs/helios/.gitkeep
similarity index 100%
rename from logs/vbdsd/.gitkeep
rename to logs/helios/.gitkeep
diff --git a/packages/cli/alias/pyra-cli.example.bat b/packages/cli/alias/pyra-cli.example.bat
deleted file mode 100644
index c53ff007..00000000
--- a/packages/cli/alias/pyra-cli.example.bat
+++ /dev/null
@@ -1,5 +0,0 @@
-@echo off
-echo.
-python C:\Users\...\Documents\pyra-4\packages\cli\main.py %
-
-
diff --git a/packages/cli/commands/__init__.py b/packages/cli/commands/__init__.py
new file mode 100644
index 00000000..cd815ff6
--- /dev/null
+++ b/packages/cli/commands/__init__.py
@@ -0,0 +1,6 @@
+from .config import config_command_group
+from .core import core_command_group
+from .logs import logs_command_group
+from .plc import plc_command_group
+from .remove_filelocks import remove_filelocks
+from .state import state_command_group
diff --git a/packages/cli/commands/config.py b/packages/cli/commands/config.py
index 4b7c58f7..febb9ad0 100644
--- a/packages/cli/commands/config.py
+++ b/packages/cli/commands/config.py
@@ -3,6 +3,7 @@
import click
import os
import sys
+from packages.core import types
from packages.core.utils import with_filelock, update_dict_recursively
dir = os.path.dirname
@@ -13,16 +14,19 @@
sys.path.append(PROJECT_DIR)
-from packages.core.utils import ConfigValidation
-error_handler = lambda text: click.echo(click.style(text, fg="red"))
-success_handler = lambda text: click.echo(click.style(text, fg="green"))
-ConfigValidation.logging_handler = error_handler
+
+def print_green(text: str) -> None:
+ click.echo(click.style(text, fg="green"))
+
+
+def print_red(text: str) -> None:
+ click.echo(click.style(text, fg="red"))
@click.command(help="Read the current config.json file.")
@with_filelock(CONFIG_LOCK_PATH)
-def _get_config():
+def _get_config() -> None:
if not os.path.isfile(CONFIG_FILE_PATH):
shutil.copyfile(DEFAULT_CONFIG_FILE_PATH, CONFIG_FILE_PATH)
with open(CONFIG_FILE_PATH, "r") as f:
@@ -31,7 +35,7 @@ def _get_config():
except:
raise AssertionError("file not in a valid json format")
- ConfigValidation.check_structure(content)
+ types.validate_config_dict(content, partial=False, skip_filepaths=True)
click.echo(json.dumps(content))
@@ -41,35 +45,62 @@ def _get_config():
)
@click.argument("content", default="{}")
@with_filelock(CONFIG_LOCK_PATH)
-def _update_config(content: str):
- # The validation itself might print stuff using the error_handler
- if not ConfigValidation.check_partial_config_string(content):
+def _update_config(content: str) -> None:
+ # try to load the dict
+ try:
+ new_partial_json = json.loads(content)
+ except:
+ print_red("content argument is not a valid JSON string")
return
- new_partial_json = json.loads(content)
- with open(CONFIG_FILE_PATH, "r") as f:
- current_json: dict = json.load(f)
+ # validate the dict's integrity
+ try:
+ types.validate_config_dict(new_partial_json, partial=True)
+ except Exception as e:
+ print_red(str(e))
+ return
+ # load the current json file
+ try:
+ with open(CONFIG_FILE_PATH, "r") as f:
+ current_json = json.load(f)
+ except:
+ print_red("Could not load the current config.json file")
+ return
+
+ # merge current config and new partial config
merged_json = update_dict_recursively(current_json, new_partial_json)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(merged_json, f, indent=4)
- success_handler("Updated config file")
+ print_green("Updated config file")
@click.command(
help=f"Validate the current config.json file.\n\nThe required schema can be found in the documentation."
)
@with_filelock(CONFIG_LOCK_PATH)
-def _validate_current_config():
- # The validation itself might print stuff using the error_handler
- file_is_valid, _ = ConfigValidation.check_current_config_file()
- if file_is_valid:
- success_handler(f"Current config file is valid")
+def _validate_current_config() -> None:
+ # load the current json file
+ try:
+ with open(CONFIG_FILE_PATH, "r") as f:
+ current_json = json.load(f)
+ except:
+ print_red("Could not load the current config.json file")
+ return
+
+ # validate its integrity
+ try:
+ types.validate_config_dict(current_json, partial=False)
+ except Exception as e:
+ print_red(str(e))
+ return
+
+ print_green(f"Current config file is valid")
@click.group()
-def config_command_group():
+def config_command_group() -> None:
pass
diff --git a/packages/cli/commands/core.py b/packages/cli/commands/core.py
index 765607bb..0927eea9 100644
--- a/packages/cli/commands/core.py
+++ b/packages/cli/commands/core.py
@@ -1,12 +1,9 @@
import subprocess
-import time
+from typing import Optional
import click
import os
import psutil
-from packages.core.modules.enclosure_control import EnclosureControl
-from packages.core.modules.opus_measurement import OpusMeasurement
-from packages.core.modules.sun_tracking import SunTracking
-from packages.core.utils import ConfigInterface, Logger
+from packages.core import utils, interfaces, modules
dir = os.path.dirname
PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
@@ -16,11 +13,16 @@
CORE_SCRIPT_PATH = os.path.join(PROJECT_DIR, "run-pyra-core.py")
SERVER_SCRIPT_PATH = os.path.join(PROJECT_DIR, "packages", "server", "main.py")
-error_handler = lambda text: click.echo(click.style(text, fg="red"))
-success_handler = lambda text: click.echo(click.style(text, fg="green"))
+def print_green(text: str) -> None:
+ click.echo(click.style(text, fg="green"))
-def process_is_running():
+
+def print_red(text: str) -> None:
+ click.echo(click.style(text, fg="red"))
+
+
+def process_is_running() -> Optional[int]:
for p in psutil.process_iter():
try:
arguments = p.cmdline()
@@ -31,8 +33,8 @@ def process_is_running():
return None
-def terminate_processes():
- termination_pids = []
+def terminate_processes() -> list[int]:
+ termination_pids: list[int] = []
for p in psutil.process_iter():
try:
arguments = p.cmdline()
@@ -48,78 +50,80 @@ def terminate_processes():
@click.command(
help="Start pyra-core as a background process. " + "Prevents spawning multiple processes"
)
-def _start_pyra_core():
+def _start_pyra_core() -> None:
existing_pid = process_is_running()
if existing_pid is not None:
- error_handler(f"Background process already exists with PID {existing_pid}")
+ print_red(f"Background process already exists with PID {existing_pid}")
else:
p = subprocess.Popen(
[INTERPRETER_PATH, CORE_SCRIPT_PATH],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
- Logger.log_activity_event("start-core")
- success_handler(f"Started background process with PID {p.pid}")
+ utils.Logger.log_activity_event("start-core")
+ print_green(f"Started background process with PID {p.pid}")
@click.command(help="Stop the pyra-core background process")
-def _stop_pyra_core():
+def _stop_pyra_core() -> None:
termination_pids = terminate_processes()
if len(termination_pids) == 0:
- error_handler("No active process to be terminated")
+ print_red("No active process to be terminated")
else:
- success_handler(
+ print_green(
f"Terminated {len(termination_pids)} pyra-core background "
+ f"processe(s) with PID(s) {termination_pids}"
)
- Logger.log_activity_event("stop-core")
+ utils.Logger.log_activity_event("stop-core")
- config = ConfigInterface.read()
- if config["general"]["test_mode"] or (config["tum_plc"] is None):
+ config = interfaces.ConfigInterface.read()
+ if config["general"]["test_mode"]:
+ print_green("Skipping TUM_PLC, CamTracker, and OPUS in test mode")
return
- config = ConfigInterface().read()
+ config = interfaces.ConfigInterface().read()
- try:
- enclosure = EnclosureControl(config)
- enclosure.force_cover_close()
- enclosure.plc_interface.disconnect()
- success_handler("Successfully closed cover")
- except Exception as e:
- error_handler(f"Failed to close cover: {e}")
+ if config["tum_plc"] is not None:
+ try:
+ enclosure = modules.enclosure_control.EnclosureControl(config)
+ enclosure.force_cover_close()
+ enclosure.plc_interface.disconnect()
+ print_green("Successfully closed cover")
+ except Exception as e:
+ print_red(f"Failed to close cover: {e}")
try:
- tracking = SunTracking(config)
+ tracking = modules.sun_tracking.SunTracking(config)
if tracking.ct_application_running():
tracking.stop_sun_tracking_automation()
- success_handler("Successfully closed CamTracker")
+ print_green("Successfully closed CamTracker")
except Exception as e:
- error_handler(f"Failed to close CamTracker: {e}")
+ print_red(f"Failed to close CamTracker: {e}")
try:
processes = [p.name() for p in psutil.process_iter()]
- for e in ["opus.exe", "OpusCore.exe"]:
- if e in processes:
- exit_code = os.system(f"taskkill /f /im {e}")
+ for executable in ["opus.exe", "OpusCore.exe"]:
+ if executable in processes:
+ exit_code = os.system(f"taskkill /f /im {executable}")
assert (
exit_code == 0
- ), f'taskkill of "{e}" ended with an exit_code of {exit_code}'
- success_handler("Successfully closed OPUS")
+ ), f'taskkill of "{executable}" ended with an exit_code of {exit_code}'
+ print_green("Successfully closed OPUS")
except Exception as e:
- error_handler(f"Failed to close OPUS: {e}")
+ print_red(f"Failed to close OPUS: {e}")
@click.command(help="Checks whether the pyra-core background process is running")
-def _pyra_core_is_running():
+def _pyra_core_is_running() -> None:
existing_pid = process_is_running()
if existing_pid is not None:
- success_handler(f"pyra-core is running with PID {existing_pid}")
+ print_green(f"pyra-core is running with PID {existing_pid}")
else:
- error_handler("pyra-core is not running")
+ print_red("pyra-core is not running")
@click.group()
-def core_command_group():
+def core_command_group() -> None:
pass
diff --git a/packages/cli/commands/logs.py b/packages/cli/commands/logs.py
index c20dcff6..6ecc53ca 100644
--- a/packages/cli/commands/logs.py
+++ b/packages/cli/commands/logs.py
@@ -8,29 +8,34 @@
DEBUG_LOG_FILE = os.path.join(PROJECT_DIR, "logs", "debug.log")
LOG_FILES_LOCK = os.path.join(PROJECT_DIR, "logs", ".logs.lock")
-error_handler = lambda text: click.echo(click.style(text, fg="red"))
-success_handler = lambda text: click.echo(click.style(text, fg="green"))
+
+def print_green(text: str) -> None:
+ click.echo(click.style(text, fg="green"))
+
+
+def print_red(text: str) -> None:
+ click.echo(click.style(text, fg="red"))
@click.command(help="Read the current info.log or debug.log file.")
@click.option("--level", default="INFO", help="Log level INFO or DEBUG")
@with_filelock(LOG_FILES_LOCK)
-def _read_logs(level: str):
+def _read_logs(level: str) -> None:
if level in ["INFO", "DEBUG"]:
with open(INFO_LOG_FILE if level == "INFO" else DEBUG_LOG_FILE, "r") as f:
click.echo("".join(f.readlines()))
else:
- error_handler("Level has to be either INFO or DEBUG.")
+ print_red("Level has to be either INFO or DEBUG.")
@click.command(help="Archive the current log files.")
-def _archive_logs():
+def _archive_logs() -> None:
Logger.archive()
- success_handler("done!")
+ print_green("done!")
@click.group()
-def logs_command_group():
+def logs_command_group() -> None:
pass
diff --git a/packages/cli/commands/plc.py b/packages/cli/commands/plc.py
index 72b2c9dc..db7ca9ab 100644
--- a/packages/cli/commands/plc.py
+++ b/packages/cli/commands/plc.py
@@ -1,11 +1,9 @@
import json
import time
-from typing import Callable
+from typing import Callable, Optional
import click
import os
-from packages.core.modules.enclosure_control import CoverError
-from packages.core.utils import StateInterface, ConfigInterface, PLCInterface, PLCError
-from packages.core.utils import with_filelock
+from packages.core import types, utils, interfaces, modules
dir = os.path.dirname
PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
@@ -13,37 +11,45 @@
CONFIG_FILE_PATH = os.path.join(PROJECT_DIR, "config", "config.json")
CONFIG_LOCK_PATH = os.path.join(PROJECT_DIR, "config", ".config.lock")
-error_handler = lambda text: click.echo(click.style(text, fg="red"))
-success_handler = lambda text: click.echo(click.style(text, fg="green"))
+def print_green(text: str) -> None:
+ click.echo(click.style(text, fg="green"))
-def get_plc_interface():
- config = ConfigInterface.read()
+
+def print_red(text: str) -> None:
+ click.echo(click.style(text, fg="red"))
+
+
+def get_plc_interface() -> Optional[interfaces.PLCInterface]:
+ config = interfaces.ConfigInterface.read()
plc_interface = None
try:
assert config["tum_plc"] is not None, "PLC not configured"
assert config["tum_plc"]["controlled_by_user"], "PLC is controlled by automation"
- plc_interface = PLCInterface(config)
+ plc_interface = interfaces.PLCInterface(
+ config["tum_plc"]["version"], config["tum_plc"]["ip"]
+ )
plc_interface.connect()
- except (PLCError, AssertionError) as e:
- error_handler(f"{e}")
+ except Exception as e:
+ print_red(f"{e}")
+ return None
return plc_interface
@click.command(help="Read current state from plc.")
@click.option("--no-indent", is_flag=True, help="Do not print the JSON in an indented manner")
-def _read(no_indent):
+def _read(no_indent: bool) -> None:
plc_interface = get_plc_interface()
if plc_interface is not None:
plc_readings = plc_interface.read()
- success_handler(json.dumps(plc_readings.to_dict(), indent=(None if no_indent else 2)))
+ print_green(json.dumps(plc_readings, indent=(None if no_indent else 2)))
plc_interface.disconnect()
@click.command(help="Run plc function 'reset()'")
-def _reset():
+def _reset() -> None:
plc_interface = get_plc_interface()
if plc_interface is not None:
plc_interface.reset()
@@ -54,16 +60,18 @@ def _reset():
time.sleep(2)
running_time += 2
if not plc_interface.reset_is_needed():
- StateInterface.update(
+ interfaces.StateInterface.update(
{"enclosure_plc_readings": {"state": {"reset_needed": False}}}
)
break
assert running_time <= 20, "plc took to long to set reset_needed to false"
- success_handler("Ok")
+ print_green("Ok")
plc_interface.disconnect()
-def wait_until_cover_is_at_angle(plc_interface: PLCInterface, new_cover_angle, timeout=15):
+def wait_until_cover_is_at_angle(
+ plc_interface: interfaces.PLCInterface, new_cover_angle: int, timeout: float = 15
+) -> None:
# waiting until cover is at this angle
running_time = 0
while True:
@@ -71,7 +79,7 @@ def wait_until_cover_is_at_angle(plc_interface: PLCInterface, new_cover_angle, t
running_time += 2
current_cover_angle = plc_interface.get_cover_angle()
if abs(new_cover_angle - current_cover_angle) <= 3:
- StateInterface.update(
+ interfaces.StateInterface.update(
{
"enclosure_plc_readings": {
"actors": {"current_angle": new_cover_angle},
@@ -82,14 +90,14 @@ def wait_until_cover_is_at_angle(plc_interface: PLCInterface, new_cover_angle, t
break
if running_time > timeout:
- raise CoverError(
+ raise modules.enclosure_control.EnclosureControl.CoverError(
f"Cover took too long to move, latest cover angle: {current_cover_angle}"
)
@click.command(help="Run plc function 'move_cover()'")
@click.argument("angle")
-def _set_cover_angle(angle):
+def _set_cover_angle(angle: str) -> None:
plc_interface = get_plc_interface()
if plc_interface is not None:
new_cover_angle = int("".join([c for c in str(angle) if c.isnumeric() or c == "."]))
@@ -102,21 +110,25 @@ def _set_cover_angle(angle):
plc_interface.set_manual_control(False)
wait_until_cover_is_at_angle(plc_interface, new_cover_angle)
- success_handler("Ok")
+ print_green("Ok")
plc_interface.disconnect()
-@with_filelock(CONFIG_LOCK_PATH)
-def enable_user_control_in_config():
+@utils.with_filelock(CONFIG_LOCK_PATH)
+def enable_user_control_in_config() -> None:
with open(CONFIG_FILE_PATH, "r") as f:
- config: dict = json.load(f)
- config["tum_plc"]["controlled_by_user"] = True
- with open(CONFIG_FILE_PATH, "w") as f:
- json.dump(config, f, indent=4)
+ config = json.load(f)
+ types.validate_config_dict(config)
+
+ verified_config: types.ConfigDict = config
+ if verified_config["tum_plc"] is not None:
+ verified_config["tum_plc"]["controlled_by_user"] = True
+ with open(CONFIG_FILE_PATH, "w") as f:
+ json.dump(verified_config, f, indent=4)
@click.command(help="Run plc function 'force_cover_close()'")
-def _close_cover():
+def _close_cover() -> None:
enable_user_control_in_config()
plc_interface = get_plc_interface()
@@ -127,65 +139,66 @@ def _close_cover():
plc_interface.set_manual_control(False)
wait_until_cover_is_at_angle(plc_interface, 0)
- success_handler("Ok")
+ print_green("Ok")
plc_interface.disconnect()
def set_boolean_plc_state(
- state, get_setter_function: Callable[[PLCInterface], Callable[[bool], None]]
-):
+ state: str,
+ get_setter_function: Callable[[interfaces.PLCInterface], Callable[[bool], None]],
+) -> None:
plc_interface = get_plc_interface()
if plc_interface is not None:
assert state in ["true", "false"], 'state has to be either "true" or "false"'
get_setter_function(plc_interface)(state == "true")
- success_handler("Ok")
+ print_green("Ok")
plc_interface.disconnect()
@click.command(help="Run plc function 'set_sync_to_tracker()'")
@click.argument("state")
-def _set_sync_to_tracker(state):
+def _set_sync_to_tracker(state: str) -> None:
set_boolean_plc_state(state, lambda p: p.set_sync_to_tracker)
@click.command(help="Run plc function 'set_auto_temperature()'")
@click.argument("state")
-def _set_auto_temperature(state):
+def _set_auto_temperature(state: str) -> None:
set_boolean_plc_state(state, lambda p: p.set_auto_temperature)
@click.command(help="Run plc function 'set_power_heater()'")
@click.argument("state")
-def _set_heater_power(state):
+def _set_heater_power(state: str) -> None:
set_boolean_plc_state(state, lambda p: p.set_power_heater)
@click.command(help="Run plc function 'set_power_heater()'")
@click.argument("state")
-def _set_camera_power(state):
+def _set_camera_power(state: str) -> None:
set_boolean_plc_state(state, lambda p: p.set_power_camera)
@click.command(help="Run plc function 'set_power_router()'")
@click.argument("state")
-def _set_router_power(state):
+def _set_router_power(state: str) -> None:
set_boolean_plc_state(state, lambda p: p.set_power_router)
@click.command(help="Run plc function 'set_power_spectrometer()'")
@click.argument("state")
-def _set_spectrometer_power(state):
+def _set_spectrometer_power(state: str) -> None:
set_boolean_plc_state(state, lambda p: p.set_power_spectrometer)
@click.command(help="Run plc function 'set_power_computer()'")
@click.argument("state")
-def _set_computer_power(state):
+def _set_computer_power(state: str) -> None:
set_boolean_plc_state(state, lambda p: p.set_power_computer)
@click.group()
-def plc_command_group():
+def plc_command_group() -> None:
pass
diff --git a/packages/cli/commands/remove_filelocks.py b/packages/cli/commands/remove_filelocks.py
index 4a7b642f..56fb5849 100644
--- a/packages/cli/commands/remove_filelocks.py
+++ b/packages/cli/commands/remove_filelocks.py
@@ -5,25 +5,26 @@
PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
-error_handler = lambda text: click.echo(click.style(text, fg="red"))
-success_handler = lambda text: click.echo(click.style(text, fg="green"))
+def print_green(text: str) -> None:
+ click.echo(click.style(text, fg="green"))
+
+
+def print_red(text: str) -> None:
+ click.echo(click.style(text, fg="red"))
@click.command(
help="Remove all filelocks. Helpful when any of the programs crashed "
+ "during writing to a file. Should not be necessary normally."
)
-def remove_filelocks():
+def remove_filelocks() -> None:
lock_files = [
os.path.join(PROJECT_DIR, "config", ".config.lock"),
+ os.path.join(PROJECT_DIR, "config", ".state.lock"),
os.path.join(PROJECT_DIR, "logs", ".logs.lock"),
- os.path.join(PROJECT_DIR, "state", ".state.lock"),
]
- if input("Are you sure? (y) ").startswith("y"):
- for f in lock_files:
- if os.path.isfile(f):
- os.remove(f)
- success_handler(f"Removing {f}")
- success_handler("Done!")
- else:
- error_handler("Aborting")
+ for f in lock_files:
+ if os.path.isfile(f):
+ os.remove(f)
+ print_green(f"Removing {f}")
+ print_green("Done!")
diff --git a/packages/cli/commands/state.py b/packages/cli/commands/state.py
index 6773bdb1..93faa34c 100644
--- a/packages/cli/commands/state.py
+++ b/packages/cli/commands/state.py
@@ -1,23 +1,20 @@
import json
import click
import os
-from packages.core.utils import StateInterface, with_filelock
+from packages.core import utils, interfaces
dir = os.path.dirname
PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
STATE_FILE_PATH = os.path.join(PROJECT_DIR, "runtime-data", "state.json")
STATE_LOCK_PATH = os.path.join(PROJECT_DIR, "config", ".state.lock")
-error_handler = lambda text: click.echo(click.style(text, fg="red"))
-success_handler = lambda text: click.echo(click.style(text, fg="green"))
-
@click.command(help="Read the current state.json file.")
@click.option("--no-indent", is_flag=True, help="Do not print the JSON in an indented manner")
-@with_filelock(STATE_LOCK_PATH)
-def _get_state(no_indent):
+@utils.with_filelock(STATE_LOCK_PATH)
+def _get_state(no_indent: bool) -> None:
if not os.path.isfile(STATE_FILE_PATH):
- StateInterface.initialize()
+ interfaces.StateInterface.initialize()
with open(STATE_FILE_PATH, "r") as f:
try:
@@ -33,7 +30,7 @@ def _get_state(no_indent):
@click.group()
-def state_command_group():
+def state_command_group() -> None:
pass
diff --git a/packages/cli/main.py b/packages/cli/main.py
index b7d1f0df..76544ccd 100644
--- a/packages/cli/main.py
+++ b/packages/cli/main.py
@@ -6,25 +6,27 @@
PROJECT_DIR = dir(dir(dir(os.path.abspath(__file__))))
sys.path.append(PROJECT_DIR)
-from packages.cli.commands.config import config_command_group
-from packages.cli.commands.state import state_command_group
-from packages.cli.commands.logs import logs_command_group
-from packages.cli.commands.core import core_command_group
-from packages.cli.commands.plc import plc_command_group
-from packages.cli.commands.remove_filelocks import remove_filelocks
+from packages.cli.commands import (
+ config_command_group,
+ core_command_group,
+ logs_command_group,
+ plc_command_group,
+ remove_filelocks,
+ state_command_group,
+)
@click.group()
-def cli():
+def cli() -> None:
pass
cli.add_command(config_command_group, name="config")
-cli.add_command(state_command_group, name="state")
-cli.add_command(logs_command_group, name="logs")
cli.add_command(core_command_group, name="core")
+cli.add_command(logs_command_group, name="logs")
cli.add_command(plc_command_group, name="plc")
cli.add_command(remove_filelocks, name="remove-filelocks")
+cli.add_command(state_command_group, name="state")
if __name__ == "__main__":
diff --git a/packages/core/README.md b/packages/core/README.md
index eb931bb9..6edaf6ab 100644
--- a/packages/core/README.md
+++ b/packages/core/README.md
@@ -1,8 +1,39 @@
# `pyra-core`
-`pyra-core` is the program that is constantly running on the enclosure and operates it.
+## Codebase structure
+
+### Responsibilities
+
+`types` contains all types used in the codebase. The whole codebase contains static type hints. A static type analysis can be done using `mypy` (see `scripts/`).
+
+`utils` contains all supporting functionality used in one or more places.
+
+`interfaces` includes the "low-level" code to interact with the PLC, the operating system, and the config- and state-files.
+
+`modules` contains the different steps that PYRA Core runs sequentially on the main thread.
+
+`threads` contains the logic that PYRA Core runs in parallel to the main thread.
+
+### Import hierarchy
+
+- `types` doesn't import any other code from PYRA Core
+- `utils` can import `types`
+- `interfaces` can import `types` and `utils`
+- `modules` and `threads` can import `interfaces`, `types`, and `utils`
+- `main.py` can import all of the above
+
+```mermaid
+ graph LR;
+ A["types"] --> B;
+ B["utils"] --> C;
+ C["interfaces"] --> D;
+ C --> E;
+ D["modules"] --> F;
+ E["threads"] --> F["main.py"];
+```
+
+_\* the graph is transient_
-
## Logging
@@ -10,9 +41,9 @@
All scripts that output messages at runtime should use the `Logger` class:
```python
-from packages.core.utils.logger import Logger
+from packages.core import utils
-logger = Logger()
+logger = utils.Logger()
logger.debug("...")
logger.info("...")
@@ -22,10 +53,10 @@ logger.error("...")
# By default, it will log from a "pyra.core" origin
-logger = Logger()
+logger = utils.Logger()
-# Here, it will log from a "pyra.core.camtracker" origin
-logger = Logger(origin="pyra.core.camtracker")
+# Here, it will log from a "camtracker" origin
+logger = utils.Logger(origin="camtracker")
```
Messages from all log levels can be found in `logs/debug.log`, messages from levels INFO/WARNING/CRITICAL/ERROR can be found in `logs/info.log`.
diff --git a/packages/core/__init__.py b/packages/core/__init__.py
index 33e52140..f45e623e 100644
--- a/packages/core/__init__.py
+++ b/packages/core/__init__.py
@@ -1,3 +1,6 @@
-from .utils.interfaces import config_validation
+from . import types
+from . import utils
+from . import interfaces
from . import modules
+from . import threads
from . import main
diff --git a/packages/core/interfaces/__init__.py b/packages/core/interfaces/__init__.py
new file mode 100644
index 00000000..b8210a9c
--- /dev/null
+++ b/packages/core/interfaces/__init__.py
@@ -0,0 +1,4 @@
+from .state_interface import StateInterface
+from .config_interface import ConfigInterface
+from .plc_interface import PLCInterface
+from .os_interface import OSInterface
diff --git a/packages/core/interfaces/config_interface.py b/packages/core/interfaces/config_interface.py
new file mode 100644
index 00000000..a903b69d
--- /dev/null
+++ b/packages/core/interfaces/config_interface.py
@@ -0,0 +1,28 @@
+import json
+import os
+from typing import Any
+from packages.core import types, utils
+
+dir = os.path.dirname
+PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
+
+CONFIG_FILE_PATH = os.path.join(PROJECT_DIR, "config", "config.json")
+CONFIG_LOCK_PATH = os.path.join(PROJECT_DIR, "config", ".config.lock")
+
+
+class ConfigInterface:
+ @staticmethod
+ @utils.with_filelock(CONFIG_LOCK_PATH, timeout=10)
+ def read() -> types.ConfigDict:
+ """
+ Read the contents of the current config.json file.
+ The function will validate its integrity and raises
+ an Exception if the file is not valid.
+ """
+ with open(CONFIG_FILE_PATH, "r") as f:
+ new_object: Any = json.load(f)
+ types.validate_config_dict(new_object)
+ config: types.ConfigDict = new_object
+
+ utils.Astronomy.CONFIG = config
+ return config
diff --git a/packages/core/interfaces/os_interface.py b/packages/core/interfaces/os_interface.py
new file mode 100644
index 00000000..95466d2c
--- /dev/null
+++ b/packages/core/interfaces/os_interface.py
@@ -0,0 +1,121 @@
+from typing import Literal
+import psutil
+import datetime
+
+
+class OSInterface:
+ @staticmethod
+ class StorageError(Exception):
+ """Raised when storage is more than 90% full"""
+
+ @staticmethod
+ class LowEnergyError(Exception):
+ """Raised when battery is less than 20% full"""
+
+ @staticmethod
+ def get_cpu_usage() -> list[float]:
+ """returns cpu_percent for all cores -> list [cpu1%, cpu2%,...]"""
+ return psutil.cpu_percent(interval=1, percpu=True) # type: ignore
+
+ @staticmethod
+ def get_memory_usage() -> float:
+ """returns -> tuple (total, available, percent, used, free, active, inactive,
+ buffers, cached, shared, slab)
+ """
+ v_memory = psutil.virtual_memory()
+ return v_memory.percent
+
+ @staticmethod
+ def get_disk_space() -> float:
+ """Returns disk space used in % as float.
+ -> tuple (total, used, free, percent)"""
+ disk = psutil.disk_usage("/")
+ return disk.percent
+
+ @staticmethod
+ def validate_disk_space() -> None:
+ """Raises an error if the diskspace is less than 10%"""
+ if OSInterface.get_disk_space() > 90:
+ raise OSInterface.StorageError(
+ "Disk space is less than 10%. This is bad for the OS stability."
+ )
+
+ # TODO: function is not working as expected. Needs revision.
+ @staticmethod
+ def get_connection_status(
+ ip: str,
+ ) -> str:
+ """
+ Takes ip address as input str: i.e. 10.10.0.4
+ Checks the ip connection for that address.
+ """
+
+ connections = psutil.net_connections(kind="inet4")
+
+ for connection in connections:
+ if connection.laddr:
+ if connection.laddr.ip == ip:
+ return connection.status
+ if connection.raddr:
+ if connection.raddr.ip == ip:
+ return connection.status
+
+ return "NO_INFO"
+
+ @staticmethod
+ def get_system_battery() -> int:
+ """
+ Returns system battery in percent as an integer (1-100).
+ Returns 100 if device has no battery.
+ """
+ battery_state = psutil.sensors_battery()
+ if battery_state is not None:
+ return battery_state.percent
+ return 100
+
+ @staticmethod
+ def validate_system_battery() -> None:
+ """Raises LowEnergyError if system battery runs lower than 20%."""
+ battery_state = psutil.sensors_battery()
+ if battery_state is not None:
+ if battery_state.percent < 20:
+ raise OSInterface.LowEnergyError(
+ "The battery of the system is below 20%. Please check the power supply."
+ )
+
+ @staticmethod
+ def get_last_boot_time() -> str:
+ """Returns last OS boot time."""
+ return datetime.datetime.fromtimestamp(psutil.boot_time()).strftime(
+ "%Y-%m-%d %H:%M:%S"
+ )
+
+ @staticmethod
+ def get_process_status(
+ process_name: str,
+ ) -> Literal[
+ "running",
+ "sleeping",
+ "disk-sleep",
+ "stopped",
+ "tracing-stop",
+ "zombie",
+ "dead",
+ "wake-kill",
+ "waking",
+ "idle",
+ "locked",
+ "waiting",
+ "suspended",
+ "parked",
+ "not-found",
+ ]:
+ """
+ Takes a process name "*.exe" and returns its OS process
+ status (see return types).
+ """
+ for p in psutil.process_iter():
+ if p.name() == process_name:
+ return p.status()
+
+ return "not-found"
diff --git a/packages/core/interfaces/plc_interface.py b/packages/core/interfaces/plc_interface.py
new file mode 100644
index 00000000..d72595a7
--- /dev/null
+++ b/packages/core/interfaces/plc_interface.py
@@ -0,0 +1,429 @@
+from datetime import datetime
+from typing import Literal, Optional
+import snap7
+import time
+import os
+from snap7.exceptions import Snap7Exception
+from packages.core import types, utils, interfaces
+
+logger = utils.Logger(origin="plc-interface")
+dir = os.path.dirname
+PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
+
+# TODO: use tuples (3 ints vs 4 ints)
+PLC_SPECIFICATION_VERSIONS: dict[Literal[1, 2], types.PlcSpecificationDict] = {
+ 1: {
+ "actors": {
+ "current_angle": [25, 6, 2],
+ "fan_speed": [8, 18, 2],
+ "move_cover": [25, 8, 2],
+ "nominal_angle": [25, 8, 2],
+ },
+ "control": {
+ "auto_temp_mode": [8, 24, 1, 2],
+ "manual_control": [8, 24, 1, 5],
+ "manual_temp_mode": [8, 24, 1, 3],
+ "reset": [3, 4, 1, 5],
+ "sync_to_tracker": [8, 16, 1, 0],
+ },
+ "sensors": {"humidity": [8, 22, 2], "temperature": [8, 20, 2]},
+ "state": {
+ "cover_closed": [25, 2, 1, 2],
+ "motor_failed": [8, 12, 1, 3],
+ "rain": [8, 6, 1, 0],
+ "reset_needed": [3, 2, 1, 2],
+ "ups_alert": [8, 0, 1, 1],
+ },
+ "power": {
+ "camera": [8, 16, 1, 2],
+ "computer": [8, 16, 1, 6],
+ "heater": [8, 16, 1, 5],
+ "router": [8, 16, 1, 3],
+ "spectrometer": [8, 16, 1, 1],
+ },
+ "connections": {
+ "camera": [8, 14, 1, 6],
+ "computer": [8, 14, 1, 3],
+ "heater": [8, 14, 1, 1],
+ "router": [8, 14, 1, 2],
+ "spectrometer": [8, 14, 1, 0],
+ },
+ },
+ 2: {
+ "actors": {
+ "current_angle": [6, 6, 2],
+ "fan_speed": [8, 4, 2],
+ "move_cover": [6, 8, 2],
+ "nominal_angle": [6, 8, 2],
+ },
+ "control": {
+ "auto_temp_mode": [8, 24, 1, 5],
+ "manual_control": [8, 12, 1, 7],
+ "manual_temp_mode": [8, 24, 1, 4],
+ "reset": [3, 4, 1, 5],
+ "sync_to_tracker": [8, 8, 1, 1],
+ },
+ "sensors": {"humidity": [8, 22, 2], "temperature": [8, 16, 2]},
+ "state": {
+ "cover_closed": [6, 16, 1, 1],
+ "motor_failed": None,
+ "rain": [3, 0, 1, 0],
+ "reset_needed": [3, 2, 1, 2],
+ "ups_alert": [8, 13, 1, 6],
+ },
+ "power": {
+ "camera": [8, 8, 1, 4], # K5 Relay
+ "computer": None,
+ "heater": [8, 12, 1, 7], # K3 Relay
+ "router": None, # not allowed
+ "spectrometer": [8, 8, 1, 2], # K4 Relay
+ },
+ "connections": {
+ "camera": None,
+ "computer": [8, 13, 1, 2],
+ "heater": [8, 6, 1, 1],
+ "router": [8, 12, 1, 4],
+ "spectrometer": None,
+ },
+ },
+}
+
+
+class PLCInterface:
+ """https://buildmedia.readthedocs.org/media/pdf/python-snap7/latest/python-snap7.pdf"""
+
+ @staticmethod
+ class PLCError(Exception):
+ """
+ Raised when updating a boolean value on the
+ plc did not change its internal value.
+
+ Can originate from:
+ * set_power_camera/_computer/_heater/_router/_spectrometer
+ * set_sync_to_tracker/_manual_control
+ * set_auto_temperature/_manual_temperature
+ """
+
+ def __init__(self, plc_version: Literal[1, 2], plc_ip: str) -> None:
+ self.plc_version = plc_version
+ self.plc_ip = plc_ip
+ self.specification = PLC_SPECIFICATION_VERSIONS[plc_version]
+
+ # CONNECTION/CLASS MANAGEMENT
+
+ def update_config(self, new_plc_version: Literal[1, 2], new_plc_ip: str) -> None:
+ """
+ Update the internally used config (executed at the)
+ beginning of enclosure-control's run-function.
+
+ Reconnecting to PLC, when IP has changed.
+ """
+ if (self.plc_version != new_plc_version) or (self.plc_ip != new_plc_ip):
+ logger.debug("PLC ip has changed, reconnecting now")
+ self.disconnect()
+ self.plc_version = new_plc_version
+ self.plc_ip = new_plc_ip
+ self.specification = PLC_SPECIFICATION_VERSIONS[self.plc_version]
+ self.connect()
+
+ def connect(self) -> None:
+ """
+ Connects to the PLC Snap7. Times out after 30 seconds.
+ """
+ self.plc = snap7.client.Client()
+ start_time = time.time()
+
+ while True:
+ if (time.time() - start_time) > 30:
+ raise Snap7Exception("Connect to PLC timed out.")
+
+ try:
+ self.plc.connect(self.plc_ip, 0, 1)
+ time.sleep(0.2)
+
+ if self.plc.get_connected():
+ logger.debug("Connected to PLC.")
+ return
+
+ self.plc.destroy()
+ self.plc = snap7.client.Client()
+
+ except Snap7Exception:
+ self.plc.destroy()
+ self.plc = snap7.client.Client()
+
+ def disconnect(self) -> None:
+ """
+ Disconnects from the PLC Snap7
+ """
+ try:
+ self.plc.disconnect()
+ self.plc.destroy()
+ logger.debug("Gracefully disconnected from PLC.")
+ except Snap7Exception:
+ self.plc.destroy()
+ logger.debug("Disconnected ungracefully from PLC.")
+
+ def is_responsive(self) -> bool:
+ """Pings the PLC"""
+ return os.system("ping -n 1 " + self.plc_ip) == 0
+
+ # DIRECT READ FUNCTIONS
+
+ def rain_is_detected(self) -> bool:
+ return self.__read_bool(self.specification["state"]["rain"])
+
+ def cover_is_closed(self) -> bool:
+ """
+ Reads the single value "state.cover_closed"
+ """
+ return self.__read_bool(self.specification["state"]["cover_closed"])
+
+ def reset_is_needed(self) -> bool:
+ """
+ Reads the single value "state.reset_needed"
+ """
+ return self.__read_bool(self.specification["state"]["reset_needed"])
+
+ def get_cover_angle(self) -> int:
+ """
+ Reads the single value "actors.current_angle"
+ """
+ return self.__read_int(self.specification["actors"]["current_angle"])
+
+ # BULK READ
+
+ def read(self) -> types.PlcStateDict:
+ """
+ Read the whole state of the PLC
+ """
+
+ # TODO: self.plc.read_multi_vars()
+
+ plc_db_content: dict[int, int] = {}
+ plc_db_size = {1: {3: 6, 8: 26, 25: 10}, 2: {3: 5, 6: 17, 8: 25}}[self.plc_version]
+
+ for db_index, db_size in plc_db_size.items():
+ plc_db_content[db_index] = self.plc.db_read(db_index, 0, db_size)
+ self.__sleep_while_cpu_is_busy()
+
+ logger.debug(f"new plc bulk read: {plc_db_content}")
+
+ def _get_int(spec: Optional[list[int]]) -> Optional[int]:
+ if spec is None:
+ return None
+ return snap7.util.get_int(plc_db_content[spec[0]], spec[1]) # type: ignore
+
+ def _get_bool(spec: Optional[list[int]]) -> Optional[bool]:
+ if spec is None:
+ return None
+ return snap7.util.get_bool(plc_db_content[spec[0]], spec[1], spec[3]) # type: ignore
+
+ s = self.specification
+
+ return {
+ "last_read_time": datetime.now().strftime("%H:%M:%S"),
+ "actors": {
+ "fan_speed": _get_int(s["actors"]["fan_speed"]),
+ "current_angle": _get_int(s["actors"]["current_angle"]),
+ },
+ "control": {
+ "auto_temp_mode": _get_bool(s["control"]["auto_temp_mode"]),
+ "manual_control": _get_bool(s["control"]["manual_control"]),
+ "manual_temp_mode": _get_bool(s["control"]["manual_temp_mode"]),
+ "sync_to_tracker": _get_bool(s["control"]["sync_to_tracker"]),
+ },
+ "sensors": {
+ "humidity": _get_int(s["sensors"]["humidity"]),
+ "temperature": _get_int(s["sensors"]["temperature"]),
+ },
+ "state": {
+ "cover_closed": _get_bool(s["state"]["cover_closed"]),
+ "motor_failed": _get_bool(s["state"]["motor_failed"]),
+ "rain": _get_bool(s["state"]["rain"]),
+ "reset_needed": _get_bool(s["state"]["reset_needed"]),
+ "ups_alert": _get_bool(s["state"]["ups_alert"]),
+ },
+ "power": {
+ "camera": _get_bool(s["power"]["camera"]),
+ "computer": _get_bool(s["power"]["computer"]),
+ "heater": _get_bool(s["power"]["heater"]),
+ "router": _get_bool(s["power"]["router"]),
+ "spectrometer": _get_bool(s["power"]["spectrometer"]),
+ },
+ "connections": {
+ "camera": _get_bool(s["connections"]["camera"]),
+ "computer": _get_bool(s["connections"]["computer"]),
+ "heater": _get_bool(s["connections"]["heater"]),
+ "router": _get_bool(s["connections"]["router"]),
+ "spectrometer": _get_bool(s["connections"]["spectrometer"]),
+ },
+ }
+
+ # LOW LEVEL READ FUNCTIONS
+
+ def __sleep_while_cpu_is_busy(self) -> None:
+ """
+ Initially sleeps 0.5 seconds. The checks every 2 seconds
+ whether the CPU of the PLC is still busy. End function
+ if the CPU is idle again.
+ """
+ time.sleep(0.5)
+ if str(self.plc.get_cpu_state()) == "S7CpuStatusRun":
+ time.sleep(2)
+
+ def __read_int(self, action: list[int]) -> int:
+ """
+ Reads an INT value in the PLC database.
+
+ action is tuple: db_number, start, size
+ """
+ assert len(action) == 3
+
+ msg: bytearray = self.plc.db_read(*action)
+ value: int = snap7.util.get_int(msg, 0)
+
+ self.__sleep_while_cpu_is_busy()
+
+ return value
+
+ def __write_int(self, action: list[int], value: int) -> None:
+ """Changes an INT value in the PLC database."""
+ assert len(action) == 3
+ db_number, start, size = action
+
+ msg = bytearray(size)
+ snap7.util.set_int(msg, 0, value)
+ self.plc.db_write(db_number, start, msg)
+
+ self.__sleep_while_cpu_is_busy()
+
+ def __read_bool(self, action: list[int]) -> bool:
+ """Reads a BOOL value in the PLC database."""
+ assert len(action) == 4
+ db_number, start, size, bool_index = action
+
+ msg: bytearray = self.plc.db_read(db_number, start, size)
+ value: bool = snap7.util.get_bool(msg, 0, bool_index)
+
+ self.__sleep_while_cpu_is_busy()
+
+ return value
+
+ def __write_bool(self, action: list[int], value: bool) -> None:
+ """Changes a BOOL value in the PLC database."""
+ assert len(action) == 4
+ db_number, start, size, bool_index = action
+
+ msg = self.plc.db_read(db_number, start, size)
+ snap7.util.set_bool(msg, 0, bool_index, value)
+ self.plc.db_write(db_number, start, msg)
+
+ self.__sleep_while_cpu_is_busy()
+
+ # PLC.POWER SETTERS
+
+ def __update_bool(
+ self, new_state: bool, spec: list[int], partial_plc_state: types.PlcStateDictPartial
+ ) -> None:
+ """
+ 1. low-level direct-write new_state to PLC according to spec
+ 2. low-level direct-read of plc's value according to spec
+ 3. raise PLCInterface.PLCError if value is different
+ 4. write update to StateInterface if update was successful
+ """
+ self.__write_bool(spec, new_state)
+ if self.__read_bool(spec) != new_state:
+ raise PLCInterface.PLCError("PLC state did not change")
+
+ # TODO: check whether this results in a circular import
+ interfaces.StateInterface.update({"enclosure_plc_readings": partial_plc_state})
+
+ def set_power_camera(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ self.__update_bool(
+ new_state,
+ self.specification["power"]["camera"],
+ {"power": {"camera": new_state}},
+ )
+
+ def set_power_computer(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ assert self.specification["power"]["computer"] is not None
+ self.__update_bool(
+ new_state,
+ self.specification["power"]["computer"],
+ {"power": {"computer": new_state}},
+ )
+
+ def set_power_heater(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ self.__update_bool(
+ new_state,
+ self.specification["power"]["heater"],
+ {"power": {"heater": new_state}},
+ )
+
+ def set_power_router(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ assert self.specification["power"]["router"] is not None
+ self.__update_bool(
+ new_state,
+ self.specification["power"]["router"],
+ {"power": {"router": new_state}},
+ )
+
+ def set_power_spectrometer(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ self.__update_bool(
+ new_state,
+ self.specification["power"]["spectrometer"],
+ {"power": {"spectrometer": new_state}},
+ )
+
+ # PLC.CONTROL SETTERS
+
+ def set_sync_to_tracker(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ self.__update_bool(
+ new_state,
+ self.specification["control"]["sync_to_tracker"],
+ {"control": {"sync_to_tracker": new_state}},
+ )
+
+ def set_manual_control(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ self.__update_bool(
+ new_state,
+ self.specification["control"]["manual_control"],
+ {"control": {"manual_control": new_state}},
+ )
+
+ def set_auto_temperature(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ self.__update_bool(
+ new_state,
+ self.specification["control"]["auto_temp_mode"],
+ {"control": {"auto_temp_mode": new_state}},
+ )
+
+ def set_manual_temperature(self, new_state: bool) -> None:
+ """Raises PLCInterface.PLCError, if value hasn't been changed"""
+ self.__update_bool(
+ new_state,
+ self.specification["control"]["manual_temp_mode"],
+ {"control": {"manual_temp_mode": new_state}},
+ )
+
+ def reset(self) -> None:
+ """Does not check, whether the value has been changed"""
+ if self.plc_version == 1:
+ self.__write_bool(self.specification["control"]["reset"], False)
+ else:
+ self.__write_bool(self.specification["control"]["reset"], True)
+
+ # PLC.ACTORS SETTERS
+
+ def set_cover_angle(self, value: int) -> None:
+ """Does not check, whether the value has been changed"""
+ self.__write_int(self.specification["actors"]["move_cover"], value)
diff --git a/packages/core/interfaces/state_interface.py b/packages/core/interfaces/state_interface.py
new file mode 100644
index 00000000..69451e5d
--- /dev/null
+++ b/packages/core/interfaces/state_interface.py
@@ -0,0 +1,161 @@
+import json
+import os
+import shutil
+from packages.core import types, utils
+
+dir = os.path.dirname
+PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
+
+CONFIG_FILE_PATH = os.path.join(PROJECT_DIR, "config", "config.json")
+CONFIG_LOCK_PATH = os.path.join(PROJECT_DIR, "config", ".config.lock")
+STATE_LOCK_PATH = os.path.join(PROJECT_DIR, "config", ".state.lock")
+
+RUNTIME_DATA_PATH = os.path.join(PROJECT_DIR, "runtime-data")
+STATE_FILE_PATH = os.path.join(PROJECT_DIR, "runtime-data", "state.json")
+
+PERSISTENT_STATE_FILE_PATH = os.path.join(PROJECT_DIR, "logs", "persistent-state.json")
+
+
+EMPTY_STATE_OBJECT: types.StateDict = {
+ "helios_indicates_good_conditions": None,
+ "measurements_should_be_running": False,
+ "enclosure_plc_readings": {
+ "last_read_time": None,
+ "actors": {
+ "fan_speed": None,
+ "current_angle": None,
+ },
+ "control": {
+ "auto_temp_mode": None,
+ "manual_control": None,
+ "manual_temp_mode": None,
+ "sync_to_tracker": None,
+ },
+ "sensors": {
+ "humidity": None,
+ "temperature": None,
+ },
+ "state": {
+ "cover_closed": None,
+ "motor_failed": None,
+ "rain": None,
+ "reset_needed": None,
+ "ups_alert": None,
+ },
+ "power": {
+ "camera": None,
+ "computer": None,
+ "heater": None,
+ "router": None,
+ "spectrometer": None,
+ },
+ "connections": {
+ "camera": None,
+ "computer": None,
+ "heater": None,
+ "router": None,
+ "spectrometer": None,
+ },
+ },
+ "os_state": {
+ "cpu_usage": None,
+ "memory_usage": None,
+ "last_boot_time": None,
+ "filled_disk_space_fraction": None,
+ },
+}
+
+EMPTY_PERSISTENT_STATE_OBJECT: types.PersistentStateDict = {
+ "active_opus_macro_id": None,
+ "current_exceptions": [],
+}
+
+
+class StateInterface:
+ @staticmethod
+ @utils.with_filelock(STATE_LOCK_PATH, timeout=10)
+ def initialize() -> None:
+ """
+ This will create two files:
+
+ 1. runtime-data/state.json: {
+ "helios_indicates_good_conditions": ...,
+ "measurements_should_be_running": ...,
+ "enclosure_plc_readings": {...},
+ "os_state": {...}
+ }
+
+ 2. logs/persistent-state.json: {
+ "active_opus_macro_id": ...,
+ "current_exceptions": []
+ }
+
+ The state.json file will be cleared with every restart
+ of PYRA Core. The persistent-state.json will only be
+ created, when it does not exist yet.
+ """
+
+ # clear runtime-data directory
+ if os.path.exists(RUNTIME_DATA_PATH):
+ shutil.rmtree(RUNTIME_DATA_PATH)
+ os.mkdir(RUNTIME_DATA_PATH)
+
+ # create the state file
+ with open(STATE_FILE_PATH, "w") as f:
+ json.dump(EMPTY_STATE_OBJECT, f, indent=4)
+
+ # possibly create the persistent state file
+ if not os.path.isfile(PERSISTENT_STATE_FILE_PATH):
+ with open(PERSISTENT_STATE_FILE_PATH, "w") as f:
+ json.dump(EMPTY_PERSISTENT_STATE_OBJECT, f, indent=4)
+
+ @staticmethod
+ @utils.with_filelock(STATE_LOCK_PATH, timeout=10)
+ def read() -> types.StateDict:
+ """Read the state file and return its content"""
+ with open(STATE_FILE_PATH, "r") as f:
+ new_object: types.StateDict = json.load(f)
+ types.validate_state_dict(new_object)
+ return new_object
+
+ @staticmethod
+ @utils.with_filelock(STATE_LOCK_PATH, timeout=10)
+ def read_persistent() -> types.PersistentStateDict:
+ """Read the persistent state file and return its content"""
+ with open(PERSISTENT_STATE_FILE_PATH, "r") as f:
+ new_object: types.PersistentStateDict = json.load(f)
+ types.validate_persistent_state_dict(new_object)
+ return new_object
+
+ @staticmethod
+ @utils.with_filelock(STATE_LOCK_PATH, timeout=10)
+ def update(update: types.StateDictPartial) -> None:
+ """
+ Update the (persistent) state file and return its content.
+ The update object should only include the properties to be
+ changed in contrast to containing the whole file.
+ """
+
+ with open(STATE_FILE_PATH, "r") as f:
+ current_state = json.load(f)
+
+ new_state = utils.update_dict_recursively(current_state, update)
+ with open(STATE_FILE_PATH, "w") as f:
+ json.dump(new_state, f, indent=4)
+
+ @staticmethod
+ @utils.with_filelock(STATE_LOCK_PATH, timeout=10)
+ def update_persistent(update: types.PersistentStateDictPartial) -> None:
+ """
+ Update the (persistent) state file and return its content.
+ The update object should only include the properties to be
+ changed in contrast to containing the whole file.
+ """
+
+ with open(PERSISTENT_STATE_FILE_PATH, "r") as f:
+ current_state = json.load(f)
+ types.validate_persistent_state_dict(current_state)
+
+ new_state = utils.update_dict_recursively(current_state, update)
+ with open(PERSISTENT_STATE_FILE_PATH, "w") as f:
+ json.dump(new_state, f, indent=4)
diff --git a/packages/core/main.py b/packages/core/main.py
index f567384b..7e7cb19d 100644
--- a/packages/core/main.py
+++ b/packages/core/main.py
@@ -1,100 +1,132 @@
import os
import time
-from packages.core import modules
-from packages.core.utils import (
- ConfigInterface,
- StateInterface,
- Logger,
- ExceptionEmailClient,
-)
+from typing import Any, Optional
+from packages.core import types, utils, interfaces, modules, threads
-logger = Logger(origin="main")
+logger = utils.Logger(origin="main")
-def run():
- StateInterface.initialize()
+def update_exception_state(
+ config: types.ConfigDict, current_exceptions: list[str], new_exception: Optional[Exception]
+) -> list[str]:
+ """
+ Take a list of current_exceptions (all exceptions that are
+ present from the last mainloop iteration, possibly empty) and
+ a new_exception (the one that happened in this loop, possibly
+ None).
+
+ If the new_exception is None, all exceptions have been resolved
+ resolved: send a "resolved" email in case the current_exceptions
+ was not empty yet.
+
+ If the new_exception is not None, if it is not already in the
+ list of current_exceptions: append it to that list and send a
+ "new error occured" email.
+ """
+ try:
+ updated_current_exceptions = [*current_exceptions]
+ if new_exception is not None:
+ if type(new_exception).__name__ not in current_exceptions:
+ updated_current_exceptions.append(type(new_exception).__name__)
+ utils.ExceptionEmailClient.handle_occured_exception(config, new_exception)
+ if len(current_exceptions) == 0:
+ utils.Logger.log_activity_event("error-occured")
+ else:
+ if len(current_exceptions) > 0:
+ updated_current_exceptions = []
+ utils.ExceptionEmailClient.handle_resolved_exception(config)
+ logger.info(f"All exceptions have been resolved.")
+ utils.Logger.log_activity_event("errors-resolved")
+
+ # if no errors until now
+ interfaces.StateInterface.update_persistent({"current_exceptions": current_exceptions})
+ return updated_current_exceptions
+
+ except Exception as e:
+ logger.exception(e)
+ return current_exceptions
+
+
+def run() -> None:
+ """
+ The mainloop of PYRA Core. This function will loop infinitely.
+ It loads the config file, validates it runs every module one by
+ one, and possibly restarts the upload- and helios-thread.
+ """
+ interfaces.StateInterface.initialize()
logger.info(f"Starting mainloop inside process with PID {os.getpid()}")
+ # Loop until a valid config has been found. Without
+ # an invalid config, the mainloop cannot initialize
while True:
try:
- _CONFIG = ConfigInterface.read()
+ config = interfaces.ConfigInterface.read()
break
- except AssertionError as e:
- logger.error(f"{e}")
- logger.error(f"Invalid config, waiting 10 seconds")
+ except Exception as e:
+ logger.exception(e)
+ logger.error("Invalid config, waiting 10 seconds")
time.sleep(10)
- _modules = [
- modules.measurement_conditions.MeasurementConditions(_CONFIG),
- modules.enclosure_control.EnclosureControl(_CONFIG),
- modules.sun_tracking.SunTracking(_CONFIG),
- modules.opus_measurement.OpusMeasurement(_CONFIG),
- modules.system_checks.SystemChecks(_CONFIG),
+ # these modules will be executed one by one in each
+ # mainloop iteration
+ mainloop_modules: Any = [
+ modules.measurement_conditions.MeasurementConditions(config),
+ modules.enclosure_control.EnclosureControl(config),
+ modules.sun_tracking.SunTracking(config),
+ modules.opus_measurement.OpusMeasurement(config),
+ modules.system_checks.SystemChecks(config),
]
- vbdsd_thread = modules.vbdsd.VBDSD_Thread()
- current_exceptions = StateInterface.read(persistent=True)["current_exceptions"]
+ # these thread classes always exist and start their
+ # dedicated mainloop in a parallel thread if the
+ # respective service is configured. The threads itself
+ # load the config periodically and stop themselves
+ helios_thread_instance = threads.HeliosThread(config)
+ upload_thread_instance = threads.UploadThread(config)
+
+ current_exceptions = interfaces.StateInterface.read_persistent()["current_exceptions"]
while True:
start_time = time.time()
logger.info("Starting iteration")
+ # load config at the beginning of each mainloop iteration
try:
- _CONFIG = ConfigInterface.read()
- except AssertionError as e:
- logger.error(f"Invalid config, waiting 10 seconds")
+ config = interfaces.ConfigInterface.read()
+ except Exception as e:
+ logger.error("Invalid config, waiting 10 seconds")
time.sleep(10)
continue
- if not _CONFIG["general"]["test_mode"]:
- # Start or stop VBDSD in a thread
- vbdsd_should_be_running = (
- _CONFIG["vbdsd"] is not None
- and _CONFIG["measurement_triggers"]["consider_vbdsd"]
- )
- if vbdsd_should_be_running and not vbdsd_thread.is_running():
- vbdsd_thread.start()
- if not vbdsd_should_be_running and vbdsd_thread.is_running():
- vbdsd_thread.stop()
- else:
+ # check whether the two threads are (not) running
+ # possibly (re)start each thread
+ helios_thread_instance.update_thread_state(config)
+ upload_thread_instance.update_thread_state(config)
+
+ if config["general"]["test_mode"]:
logger.info("pyra-core in test mode")
- logger.debug("Skipping VBDSD_Thread in test mode")
+ logger.debug("Skipping HeliosThread and UploadThread in test mode")
+ # loop over every module, when one of the modules
+ # encounters an exception, this inner loop stops
+ # and the exception will be processed (logs, emails)
new_exception = None
try:
- for module in _modules:
- module.run(_CONFIG)
+ for m in mainloop_modules:
+ m.run(config)
except Exception as e:
new_exception = e
logger.exception(new_exception)
- try:
- new_current_exceptions = [*current_exceptions]
-
- if new_exception is not None:
- if type(new_exception).__name__ not in current_exceptions:
- new_current_exceptions.append(type(new_exception).__name__)
- ExceptionEmailClient.handle_occured_exception(_CONFIG, new_exception)
- if len(current_exceptions) == 0:
- Logger.log_activity_event("error-occured")
- else:
- if len(current_exceptions) > 0:
- new_current_exceptions = []
- ExceptionEmailClient.handle_resolved_exception(_CONFIG)
- logger.info(f"All exceptions have been resolved.")
- Logger.log_activity_event("errors-resolved")
-
- # if no errors until now
- current_exceptions = [*new_current_exceptions]
- StateInterface.update({"current_exceptions": current_exceptions}, persistent=True)
- except Exception as e:
- logger.exception(e)
-
- logger.info("Ending iteration")
+ # update the list of currently present exceptions
+ # send error emails on new exceptions, send resolved
+ # emails when no errors are present anymore
+ current_exceptions = update_exception_state(config, current_exceptions, new_exception)
# wait rest of loop time
+ logger.info("Ending iteration")
elapsed_time = time.time() - start_time
- time_to_wait = _CONFIG["general"]["seconds_per_core_interval"] - elapsed_time
+ time_to_wait = config["general"]["seconds_per_core_interval"] - elapsed_time
if time_to_wait > 0:
logger.debug(f"Waiting {round(time_to_wait, 2)} second(s)")
time.sleep(time_to_wait)
diff --git a/packages/core/modules/__init__.py b/packages/core/modules/__init__.py
index 0a662298..ef90122c 100644
--- a/packages/core/modules/__init__.py
+++ b/packages/core/modules/__init__.py
@@ -4,5 +4,4 @@
opus_measurement,
sun_tracking,
system_checks,
- vbdsd,
)
diff --git a/packages/core/modules/enclosure_control.py b/packages/core/modules/enclosure_control.py
index 0fa9e957..a9ddea7d 100644
--- a/packages/core/modules/enclosure_control.py
+++ b/packages/core/modules/enclosure_control.py
@@ -1,27 +1,39 @@
import time
from snap7.exceptions import Snap7Exception
-from packages.core.utils import StateInterface, Logger, Astronomy, PLCInterface, PLCError
+from packages.core import types, utils, interfaces
-logger = Logger(origin="enclosure-control")
-
-
-class CoverError(Exception):
- pass
-
-
-class MotorFailedError(Exception):
- pass
+logger = utils.Logger(origin="enclosure-control")
class EnclosureControl:
"""
- https://buildmedia.readthedocs.org/media/pdf/python-snap7/latest/python-snap7.pdf
+ EnclosureControl allows to interact with TUM made weather protected enclosures that allow a
+ 24/7 deployment of the FTIR spectrometer EM27/Sun in the field. The class takes the latest
+ decision from measurement_conditions.py (StateInterface: measurements_should_be_running) and
+ communicates with the enclosure's built in Siemens S7 PLC to read and write parameters to its
+ database (PLCInterface). Additionally, it powers down the spectrometer during dusk to extend
+ the overall spectrometer lifetime. During dawn, it powers up the spectrometer to prepare and
+ warm it up for the next measurement day. At initialization, the PLC is set to control the
+ ambient enclosure temperature in automatic mode.
+ During flank changes of measurements_should_be_running it either tells the enclosure to
+ open up the cover to allow direct sunlight to hit the CamTracker mirrors or close the cover
+ to protect the instrument. Instrument protection from bad weather conditions is always
+ prioritised over a slight maximization of measurement uptime.
"""
- def __init__(self, initial_config: dict):
+ @staticmethod
+ class CoverError(Exception):
+ pass
+
+ @staticmethod
+ class MotorFailedError(Exception):
+ pass
+
+ def __init__(self, initial_config: types.ConfigDict):
self.config = initial_config
self.initialized = False
self.last_plc_connection_time = time.time()
+ self.motor_reset_needed_in_last_iteration = False
if self.config["general"]["test_mode"]:
return
@@ -29,8 +41,13 @@ def __init__(self, initial_config: dict):
logger.debug("Skipping EnclosureControl without a TUM PLC")
return
- def _initialize(self):
- self.plc_interface = PLCInterface(self.config)
+ def __initialize(self) -> None:
+ """Initializes the default PLC settings at startup or activation in config."""
+ assert self.config["tum_plc"] is not None
+
+ self.plc_interface = interfaces.PLCInterface(
+ self.config["tum_plc"]["version"], self.config["tum_plc"]["ip"]
+ )
self.plc_interface.connect()
self.plc_interface.set_auto_temperature(True)
self.plc_state = self.plc_interface.read()
@@ -38,81 +55,118 @@ def _initialize(self):
self.last_cycle_automation_status = 0
self.initialized = True
- def run(self, new_config: dict) -> None:
+ def run(self, new_config: types.ConfigDict) -> None:
+ """Called in every cycle of the main loop.
+ Updates enclosure state based on the current automation status.
+ """
self.config = new_config
+ # Skips the rest of run if module not activated in config
if self.config["tum_plc"] is None:
logger.debug("Skipping EnclosureControl without a TUM PLC")
return
+ # Allows to run Pyra-4 without measurement system hardware present
if self.config["general"]["test_mode"]:
logger.debug("Skipping EnclosureControl in test mode")
return
logger.info("Running EnclosureControl")
+ # Check for current measurement status
+ self.measurements_should_be_running = interfaces.StateInterface.read()[
+ "measurements_should_be_running"
+ ]
+
+ # Updates the current loop to the latest config.
+ # Performs a connect to the PLC for the duration of this loop.
+ # Initializes if first call of the module.
try:
if not self.initialized:
- self._initialize()
+ self.__initialize()
else:
- self.plc_interface.update_config(self.config)
+ self.plc_interface.update_config(
+ self.config["tum_plc"]["version"], self.config["tum_plc"]["ip"]
+ )
self.plc_interface.connect()
# TODO: possibly end function if plc is not connected
- # get the latest PLC interface state and update with current config
+ # Reads and returns the latest PLC database states
try:
self.plc_state = self.plc_interface.read()
except Snap7Exception:
logger.warning("Could not read PLC state in this loop.")
- # read current state of actors and sensors in enclosure
+ # Push the latest readout of the PLC state to the StateInterface
logger.info("New continuous readings.")
- StateInterface.update({"enclosure_plc_readings": self.plc_state.to_dict()})
+ interfaces.StateInterface.update({"enclosure_plc_readings": self.plc_state})
+
+ # Check for critial error: Motor Failed Flag in PLC. In case of present
+ # motor failed flag the cover might not be closed in bad weather
+ # conditions. Potentially putting the measurement instrument at risk.
+ if self.plc_state["state"]["motor_failed"]:
+ if self.plc_state["state"]["reset_needed"] and (
+ not self.motor_reset_needed_in_last_iteration
+ ):
+ # when both motor and reset flag are set, then a reset is probably
+ # needed - skipping this module until the next iteration
+ self.motor_reset_needed_in_last_iteration = True
+ self.plc_interface.reset()
+ return
+ else:
+ # if the motor failed but no reset needed or if the reset has
+ # already been done in the last iteration, then this error will
+ # be raised (and an email sent out)
+ raise EnclosureControl.MotorFailedError(
+ "URGENT: stop all actions, check cover in person"
+ )
+ else:
+ self.motor_reset_needed_in_last_iteration = False
+ # Skip writing to the PLC as the user took over control from the automation
if self.config["tum_plc"]["controlled_by_user"]:
logger.debug(
"Skipping EnclosureControl because enclosure is controlled by user"
)
return
- # dawn/dusk detection: powerup/down spectrometer
+ # Dawn/dusk detection: powerup/down spectrometer at a defined sun angle
self.auto_set_power_spectrometer()
- if self.plc_state.state.motor_failed:
- raise MotorFailedError("URGENT: stop all actions, check cover in person")
-
- # check PLC ip connection (single ping)
+ # Check PLC ip connection (single ping).
if self.plc_interface.is_responsive():
logger.debug("Successful ping to PLC.")
else:
logger.warning("Could not ping PLC.")
- # check for automation state flank changes
- self.measurements_should_be_running = StateInterface.read()[
- "measurements_should_be_running"
- ]
+ # Syncs the cover to the current automation status present
self.sync_cover_to_measurement_status()
- # save the automation status for the next run
- self.last_cycle_automation_status = self.measurements_should_be_running
-
- # verify cover position for every loop. Close cover if supposed to be closed.
+ # Verify functions will handle desync caused by a user taking over control in previous loops
+ # Verify cover position for every loop. Close cover if supposed to be closed.
self.verify_cover_position()
- # verify that sync_to_cover status is still synced with measurement status
+ # Verify that sync_to_cover status is still synced with measurement status
self.verify_cover_sync()
- # disconnect from PLC
+ # Save the automation status for the next run. This allows for flank detection from previous completed loops.
+ self.last_cycle_automation_status = self.measurements_should_be_running
+
+ # Disconnect from PLC
self.plc_interface.disconnect()
self.last_plc_connection_time = time.time()
+ # Allows for PLC connection downtime of 10 minutes before an error is raised.
+ # Allows PLC connection to heal itself.
except Snap7Exception as e:
logger.exception(e)
now = time.time()
seconds_since_error_occured = now - self.last_plc_connection_time
if seconds_since_error_occured > 600:
- raise PLCError("Snap7Exception persisting for 10+ minutes")
+ raise interfaces.PLCInterface.PLCError(
+ "Snap7Exception persisting for 10+ minutes"
+ )
else:
logger.info(
f"Snap7Exception persisting for {round(seconds_since_error_occured/60, 2)}"
@@ -121,11 +175,17 @@ def run(self, new_config: dict) -> None:
# PLC.ACTORS SETTERS
- def move_cover(self, value) -> None:
+ def move_cover(self, value: int) -> None:
+ """Moves the cover attached on top of the enclosure. The cover is moved by a electrical
+ motor controlled by the PLC. The cover functions as weather protection for the measurement
+ instrument. In case of bad weather the PLC takes over control and closes the cover itself.
+ A movement of the cover during bad weather conditions shall not be allowed as instrument
+ saefty is priotized higher than maximization of overall measurement uptime.
+ """
logger.debug(f"Received request to move cover to position {value} degrees.")
# rain check before moving cover. PLC will deny cover requests during rain anyway
- if self.plc_state.state.rain:
+ if self.plc_state["state"]["rain"]:
logger.debug("Denied to move cover due to rain detected.")
else:
self.plc_interface.set_manual_control(True)
@@ -133,10 +193,16 @@ def move_cover(self, value) -> None:
self.plc_interface.set_manual_control(False)
def force_cover_close(self) -> None:
+ """Emergency option to call to ensure that the cover is closed immediately. Accounts for
+ possible blocking conditions caused by the PLC internals:
+ 1. Reset needed
+ 2. Sync to tracker still active
+ 3. Manual control not active
+ """
if not self.initialized:
- self._initialize()
+ self.__initialize()
- if self.plc_state.state.reset_needed:
+ if self.plc_state["state"]["reset_needed"]:
self.plc_interface.reset()
self.plc_interface.set_sync_to_tracker(False)
@@ -144,11 +210,11 @@ def force_cover_close(self) -> None:
self.plc_interface.set_cover_angle(0)
self.plc_interface.set_manual_control(False)
- def wait_for_cover_closing(self, throw_error=True) -> None:
- """Waits steps of 5s for the enclosure cover to close.
+ def wait_for_cover_closing(self, throw_error: bool = True) -> None:
+ """Validates the progress of a cover closing call. Continues when cover is closed.
+ Validation is done every 5s with a maximum waiting time of 60s.
- Raises the custom error CoverError if clover doesn't close in a given
- period of time.
+ Raises the custom error CoverError if clover doesn't close in time.
"""
start_time = time.time()
@@ -161,7 +227,7 @@ def wait_for_cover_closing(self, throw_error=True) -> None:
elapsed_time = time.time() - start_time
if elapsed_time > 60:
if throw_error:
- raise CoverError("Enclosure cover might be stuck.")
+ raise EnclosureControl.CoverError("Enclosure cover might be stuck.")
break
def auto_set_power_spectrometer(self) -> None:
@@ -170,48 +236,67 @@ def auto_set_power_spectrometer(self) -> None:
spectrometer in the morning when minimum angle is satisfied.
"""
- current_sun_elevation = Astronomy.get_current_sun_elevation()
+ current_sun_elevation = utils.Astronomy.get_current_sun_elevation()
min_power_elevation = (
self.config["general"]["min_sun_elevation"] - 1
- ) * Astronomy.units.deg
+ ) * utils.Astronomy.units.deg
if current_sun_elevation is not None:
sun_is_above_minimum = current_sun_elevation >= min_power_elevation
- if sun_is_above_minimum and (not self.plc_state.power.spectrometer):
+ if sun_is_above_minimum and (not self.plc_state["power"]["spectrometer"]):
self.plc_interface.set_power_spectrometer(True)
logger.info("Powering up the spectrometer.")
- if (not sun_is_above_minimum) and self.plc_state.power.spectrometer:
+ if (not sun_is_above_minimum) and self.plc_state["power"]["spectrometer"]:
self.plc_interface.set_power_spectrometer(False)
logger.info("Powering down the spectrometer.")
def sync_cover_to_measurement_status(self) -> None:
+ """Checks for flank changes in parameter measurement_should_be_running.
+ Positive flank: Set sync_cover flag in PLC to start matching the Camtracker mirror position.
+ Negative flank: Remove sync_cover flag in PLC and move cover to position 0.
+ """
if self.last_cycle_automation_status != self.measurements_should_be_running:
if self.measurements_should_be_running:
# flank change 0 -> 1: set cover mode: sync to tracker
- if self.plc_state.state.reset_needed:
+ if self.plc_state["state"]["reset_needed"]:
self.plc_interface.reset()
time.sleep(10)
- if not self.plc_state.state.rain:
+ if not self.plc_state["state"]["rain"]:
self.plc_interface.set_sync_to_tracker(True)
logger.info("Syncing Cover to Tracker.")
else:
# flank change 1 -> 0: remove cover mode: sync to tracker, close cover
+ if self.plc_state["state"]["reset_needed"]:
+ self.plc_interface.reset()
+ time.sleep(10)
self.plc_interface.set_sync_to_tracker(False)
self.move_cover(0)
logger.info("Closing Cover.")
self.wait_for_cover_closing(throw_error=False)
def verify_cover_position(self) -> None:
- if (not self.measurements_should_be_running) & (not self.plc_state.state.rain):
- if not self.plc_state.state.cover_closed:
+ """Verifies that the cover is closed when measurements are currently not set to be running.
+ Closed the cover in case of a mismatch.
+
+ This functions allows to detect desync caused by previous user controlled decisions. It
+ also functions as a failsafe to ensure weather protection of the instrument."""
+ if (not self.measurements_should_be_running) & (not self.plc_state["state"]["rain"]):
+ if not self.plc_state["state"]["cover_closed"]:
logger.info("Cover is still open. Trying to close again.")
self.force_cover_close()
self.wait_for_cover_closing()
def verify_cover_sync(self) -> None:
- if self.measurements_should_be_running & (not self.plc_state.control.sync_to_tracker):
+ """Syncs the current cover_sync flag in the PLC with the present measurement status.
+
+ This functions allows to detect desync caused by previous user controlled decisions."""
+ if self.measurements_should_be_running and (
+ not self.plc_state["control"]["sync_to_tracker"]
+ ):
logger.debug("Set sync to tracker to True to match measurement status.")
self.plc_interface.set_sync_to_tracker(True)
- if (not self.measurements_should_be_running) & self.plc_state.control.sync_to_tracker:
+ if (not self.measurements_should_be_running) and self.plc_state["control"][
+ "sync_to_tracker"
+ ]:
logger.debug("Set sync to tracker to False to match measurement status.")
self.plc_interface.set_sync_to_tracker(False)
diff --git a/packages/core/modules/measurement_conditions.py b/packages/core/modules/measurement_conditions.py
index 377f0755..71a68150 100644
--- a/packages/core/modules/measurement_conditions.py
+++ b/packages/core/modules/measurement_conditions.py
@@ -1,24 +1,49 @@
import datetime
-from packages.core.utils import Astronomy, StateInterface, Logger
+from packages.core import types, utils, interfaces
-logger = Logger(origin="measurement-conditions")
+logger = utils.Logger(origin="measurement-conditions")
-def get_times_from_tuples(triggers: any):
-
+def is_time_trigger_active(
+ config: types.ConfigDict,
+) -> bool:
+ """Returns true if time triggers in the config specify
+ that it should be measured right now"""
now = datetime.datetime.now()
current_time = datetime.time(now.hour, now.minute, now.second)
- start_time = datetime.time(**triggers["start_time"])
- end_time = datetime.time(**triggers["stop_time"])
- return current_time, start_time, end_time
+ start_time = datetime.time(**config["measurement_triggers"]["start_time"])
+ end_time = datetime.time(**config["measurement_triggers"]["stop_time"])
+ return (current_time > start_time) and (current_time < end_time)
class MeasurementConditions:
- def __init__(self, initial_config: dict):
+ """MeasurementConditions allows operation in three different modes: Manual, Automatic, Manual,
+ and CLI. Whenever a decision is made the parameter measurements_should_be_running in
+ StateInterface is updated.
+
+ In Manual mode, the user has full control over whether measurements should be active. The user-
+ controlled state can be controlled by the Pyra UI.
+
+ In Automatic mode, three different triggers are considered: Sun Elevation, Time, and Helios
+ State. These triggers may also be active in any combination at the same time. Measurements are
+ only set to be running if all triggers agree, while measurements will be set to be not active
+ if at least one of the active triggers decides to stop measurements.
+
+ In CLI mode, triggers from external sources can be considered. This option is available for
+ custom-built systems or sensors not part of Pyra-4. It is also possible in this mode to move the
+ measurement control to remote systems i.e. by ssh."""
+
+ def __init__(self, initial_config: types.ConfigDict) -> None:
self._CONFIG = initial_config
- def run(self, new_config: dict):
+ def run(self, new_config: types.ConfigDict) -> None:
+ """Called in every cycle of the main loop.
+ Updates StateInterface: measurements_should_be_running based on the selected mode, triggers
+ and present conditions."""
+
self._CONFIG = new_config
+
+ # Skip rest of the function if test mode is active
if self._CONFIG["general"]["test_mode"]:
logger.debug("Skipping MeasurementConditions in test mode")
return
@@ -27,6 +52,7 @@ def run(self, new_config: dict):
decision = self._CONFIG["measurement_decision"]
logger.debug(f"Decision mode for measurements is: {decision['mode']}.")
+ # Selection and evaluation of the current set measurement mode
if decision["mode"] == "manual":
measurements_should_be_running = decision["manual_decision_result"]
if decision["mode"] == "cli":
@@ -35,42 +61,49 @@ def run(self, new_config: dict):
measurements_should_be_running = self._get_automatic_decision()
if (
- StateInterface.read()["measurements_should_be_running"]
+ interfaces.StateInterface.read()["measurements_should_be_running"]
!= measurements_should_be_running
):
- Logger.log_activity_event(
+ utils.Logger.log_activity_event(
"start-measurements" if measurements_should_be_running else "stop-measurements"
)
logger.info(
f"Measurements should be running is set to: {measurements_should_be_running}."
)
- StateInterface.update(
+ # Update of the StateInterface with the latest measurement decision
+ interfaces.StateInterface.update(
{"measurements_should_be_running": measurements_should_be_running}
)
def _get_automatic_decision(self) -> bool:
+ """Evaluates the activated automatic mode triggers (Sun Angle, Time, Helios).
+ Reads the config to consider activated measurement triggers. Evaluates active measurement
+ triggers and combines their states by logical conjunction.
+ """
triggers = self._CONFIG["measurement_triggers"]
- if self._CONFIG["vbdsd"] is None:
- triggers["consider_vbdsd"] = False
+ if self._CONFIG["helios"] is None:
+ triggers["consider_helios"] = False
+ # If not triggers are considered during automatic mode return False
if not any(
[
triggers["consider_sun_elevation"],
triggers["consider_time"],
- triggers["consider_vbdsd"],
+ triggers["consider_helios"],
]
):
return False
+ # Evaluate sun elevation if trigger is active
if triggers["consider_sun_elevation"]:
logger.info("Sun elevation as a trigger is considered.")
- current_sun_elevation = Astronomy.get_current_sun_elevation()
+ current_sun_elevation = utils.Astronomy.get_current_sun_elevation()
min_sun_elevation = max(
self._CONFIG["general"]["min_sun_elevation"], triggers["min_sun_elevation"]
)
sun_above_threshold = current_sun_elevation > (
- min_sun_elevation * Astronomy.units.deg
+ min_sun_elevation * utils.Astronomy.units.deg
)
if sun_above_threshold:
logger.debug("Sun angle is above threshold.")
@@ -78,25 +111,29 @@ def _get_automatic_decision(self) -> bool:
logger.debug("Sun angle is below threshold.")
return False
+ # Evaluate time if trigger is active
if triggers["consider_time"]:
logger.info("Time as a trigger is considered.")
- current_time, start_time, end_time = get_times_from_tuples(triggers)
- time_is_valid = (current_time > start_time) and (current_time < end_time)
+ time_is_valid = is_time_trigger_active(self._CONFIG)
logger.debug(f"Time conditions are {'' if time_is_valid else 'not '}fulfilled.")
if not time_is_valid:
return False
- if triggers["consider_vbdsd"]:
- logger.info("VBDSD as a trigger is considered.")
- vbdsd_result = StateInterface.read()["vbdsd_indicates_good_conditions"]
+ # Read latest Helios decision from StateInterface if trigger is active
+ # Helios runs in a thread and evaluates the sun conditions consistanly during day.
+ if triggers["consider_helios"]:
+ logger.info("Helios as a trigger is considered.")
+ helios_result = interfaces.StateInterface.read()[
+ "helios_indicates_good_conditions"
+ ]
- if vbdsd_result is None:
- logger.debug(f"VBDSD does not nave enough images yet.")
+ if helios_result is None:
+ logger.debug(f"Helios does not nave enough images yet.")
return False
logger.debug(
- f"VBDSD indicates {'good' if vbdsd_result else 'bad'} sun conditions."
+ f"Helios indicates {'good' if helios_result else 'bad'} sun conditions."
)
- return vbdsd_result
+ return helios_result
return True
diff --git a/packages/core/modules/opus_measurement.py b/packages/core/modules/opus_measurement.py
index 637e1b44..1831bba5 100644
--- a/packages/core/modules/opus_measurement.py
+++ b/packages/core/modules/opus_measurement.py
@@ -1,38 +1,49 @@
import os
import sys
import time
-from packages.core.utils import Logger, StateInterface, Astronomy
+from typing import Any
+from packages.core import types, utils, interfaces
# these imports are provided by pywin32
+win32ui: Any = None
+dde: Any = None
if sys.platform == "win32":
import win32ui # type: ignore
import dde # type: ignore
-logger = Logger(origin="opus-measurement")
+logger = utils.Logger(origin="opus-measurement")
-class SpectrometerError(Exception):
- pass
+class OpusMeasurement:
+ """OPUS Measurements manages the FTIR spectrometer measurement software OPUS. It allows to
+ remotely communicate with OPUS via a DDE connection and trigger different commands.
+ OPUS needs an experiment file and can perform continious measurements with parameters passed
+ by a macro file. These files can be loaded via commands over the active DDE connection.
-class OpusMeasurement:
- """Creates a working DDE connection to the OPUS DDE Server.
- Allows to remotely control experiments and macros in OPUS over the
- established DDE connection.
+ During the day it makes sure that OPUS up and running and reloads the latest experiment if it
+ was changed by the operator. During night OPUS is shut down to reset after a full day of
+ measurements. Day and night is defined by the set sun angle in the config.
+
+ OPUSMeasurement will start and stop Macros according to the current value of StateInterface:
+ measurements_should_be_running.
"""
- def __init__(self, initial_config: dict):
+ def __init__(self, initial_config: types.ConfigDict):
self._CONFIG = initial_config
self.initialized = False
self.current_experiment = self._CONFIG["opus"]["experiment_path"]
- if self._CONFIG["general"]["test_mode"]:
+ if self._CONFIG["general"]["test_mode"] or (sys.platform != "win32"):
return
- self._initialize()
+ self.__initialize()
+
+ def __initialize(self) -> None:
+ """Initialize the DDE connection and sets up the conversaton."""
+ assert sys.platform == "win32"
- def _initialize(self):
# note: dde servers talk to dde servers
self.server = dde.CreateServer()
self.server.Create("Client")
@@ -40,10 +51,15 @@ def _initialize(self):
self.last_cycle_automation_status = 0
self.initialized = True
- def run(self, new_config: dict):
+ def run(self, new_config: types.ConfigDict) -> None:
+ """Called in every cycle of the main loop.
+ Starts and stops OPUS.exe based on the present sun angle. Reads StateInterface:
+ measurements_should_be_running and starts and stops the OPUS macro."""
+
+ # loads latest config
self._CONFIG = new_config
- if self._CONFIG["general"]["test_mode"]:
- logger.debug("Skipping OpusMeasurement in test mode")
+ if self._CONFIG["general"]["test_mode"] or (sys.platform != "win32"):
+ logger.debug("Skipping OpusMeasurement in test mode and on non-windows systems")
return
logger.info("Running OpusMeasurement")
@@ -56,7 +72,7 @@ def run(self, new_config: dict):
return
if not self.initialized:
- self._initialize()
+ self.__initialize()
# start or stops opus.exe depending on sun angle
self.automated_process_handling()
@@ -64,16 +80,19 @@ def run(self, new_config: dict):
# check and reload experiment if updated in config.json
self.check_for_experiment_change()
+ # checks IP connection to FTIR spectrometer
if self.__is_em27_responsive:
logger.debug("Successful ping to EM27.")
else:
logger.info("EM27 seems to be disconnected.")
# check for automation state flank changes
- measurements_should_be_running = StateInterface.read()["measurements_should_be_running"]
+ measurements_should_be_running = interfaces.StateInterface.read()[
+ "measurements_should_be_running"
+ ]
if self.last_cycle_automation_status != measurements_should_be_running:
if measurements_should_be_running:
- # flank change 0 -> 1: load experiment, start macro
+ # flank change 0 -> 1: start macro
logger.info("Starting OPUS Macro.")
self.start_macro()
else:
@@ -84,18 +103,20 @@ def run(self, new_config: dict):
# save the automation status for the next run
self.last_cycle_automation_status = measurements_should_be_running
- def __connect_to_dde_opus(self):
+ def __connect_to_dde_opus(self) -> None:
+ """Connects to the OPUS server over DDE."""
+ assert sys.platform == "win32"
try:
self.conversation.ConnectTo("OPUS", "OPUS/System")
logger.info("Connected to OPUS DDE Server.")
except:
logger.info("Could not connect to OPUS DDE Server.")
- @property
- def __test_dde_connection(self):
+ def __test_dde_connection(self) -> bool:
"""Tests the DDE connection.
Tries to reinitialize the DDE socket if connection test fails.
"""
+ assert sys.platform == "win32"
# conversation.Connected() returns 1 if connected
if self.conversation.Connected() == 1:
@@ -112,14 +133,16 @@ def __test_dde_connection(self):
self.__connect_to_dde_opus()
# retest DDE connection
- return self.conversation.Connected() == 1
+ return self.conversation.Connected() == 1 # type: ignore
- def load_experiment(self):
+ def load_experiment(self) -> None:
"""Loads a new experiment in OPUS over DDE connection."""
+ assert sys.platform == "win32"
+
self.__connect_to_dde_opus()
experiment_path = self._CONFIG["opus"]["experiment_path"]
- if not self.__test_dde_connection:
+ if not self.__test_dde_connection():
return
answer = self.conversation.Request("LOAD_EXPERIMENT " + experiment_path)
logger.info(f"Loaded new OPUS experiment: {experiment_path}")
@@ -134,12 +157,16 @@ def load_experiment(self):
logger.info("Could not load OPUS experiment as expected.")
"""
- def start_macro(self):
+ def start_macro(self) -> None:
"""Starts a new macro in OPUS over DDE connection."""
+ assert sys.platform == "win32"
+
+ # perform connect
self.__connect_to_dde_opus()
- if not self.__test_dde_connection:
+ if not self.__test_dde_connection():
return
+ # load macro
macro_path = self._CONFIG["opus"]["macro_path"]
answer = self.conversation.Request(f"RUN_MACRO {macro_path}")
logger.info(f"Started OPUS macro: {macro_path}")
@@ -155,13 +182,17 @@ def start_macro(self):
logger.info(f"Could not start OPUS macro with id: {active_macro_id} as expected.")
"""
- def stop_macro(self):
+ def stop_macro(self) -> None:
"""Stops the currently running macro in OPUS over DDE connection."""
- self.__connect_to_dde_opus()
- macro_path = os.path.basename(self._CONFIG["opus"]["macro_path"])
+ assert sys.platform == "win32"
- if not self.__test_dde_connection:
+ # perform connect
+ self.__connect_to_dde_opus()
+ if not self.__test_dde_connection():
return
+
+ # stop macro
+ macro_path = os.path.basename(self._CONFIG["opus"]["macro_path"])
answer = self.conversation.Request("KILL_MACRO " + macro_path)
logger.info(f"Stopped OPUS macro: {macro_path}")
@@ -174,11 +205,13 @@ def stop_macro(self):
logger.info(f"Could not stop OPUS macro with id: {active_macro_id} as expected.")
"""
- def close_opus(self):
- """Closes OPUS via DDE."""
+ def close_opus(self) -> None:
+ """Closes OPUS via DDE call."""
+ assert sys.platform == "win32"
+
self.__connect_to_dde_opus()
- if not self.__test_dde_connection:
+ if not self.__test_dde_connection():
return
answer = self.conversation.Request("CLOSE_OPUS")
logger.info("Stopped OPUS.exe")
@@ -191,28 +224,32 @@ def close_opus(self):
logger.info("No response for OPUS.exe close request.")
"""
- def __shutdown_dde_server(self):
+ def __shutdown_dde_server(self) -> None:
"""Note the underlying DDE object (ie, Server, Topics and Items) are
not cleaned up by this call.
"""
+ assert sys.platform == "win32"
+
self.server.Shutdown()
- def __destroy_dde_server(self):
+ def __destroy_dde_server(self) -> None:
"""Destroys the underlying C++ object."""
+ assert sys.platform == "win32"
self.server.Destroy()
- def __is_em27_responsive(self):
+ def __is_em27_responsive(self) -> bool:
"""Pings the EM27 and returns:
True -> Connected
False -> Not Connected"""
- response = os.system("ping -n 1 " + self._CONFIG["em27"]["ip"])
+ assert sys.platform == "win32"
+
+ response = os.system("ping -n 1 " + self._CONFIG["opus"]["em27_ip"])
return response == 0
- def start_opus(self):
- """Uses os.startfile() to start up OPUS
- This simulates a user click on the opus.exe.
- """
+ def start_opus(self) -> None:
+ """Starts the OPUS.exe with os.startfile(). This simulates a user click on the executable."""
+ assert sys.platform == "win32"
opus_path = self._CONFIG["opus"]["executable_path"]
opus_username = self._CONFIG["opus"]["username"]
@@ -220,19 +257,25 @@ def start_opus(self):
# works only > python3.10
# without cwd CT will have trouble loading its internal database)
- os.startfile(
- os.path.basename(opus_path),
- cwd=os.path.dirname(opus_path),
- arguments=f"/LANGUAGE=ENGLISH /DIRECTLOGINPASSWORD={opus_username}@{opus_password}",
- show_cmd=2,
- )
-
- def opus_application_running(self):
+ try:
+ os.startfile( # type: ignore
+ os.path.basename(opus_path),
+ cwd=os.path.dirname(opus_path),
+ arguments=f"/LANGUAGE=ENGLISH /DIRECTLOGINPASSWORD={opus_username}@{opus_password}",
+ show_cmd=2,
+ )
+ except AttributeError:
+ pass
+
+ def opus_application_running(self) -> bool:
"""Checks if OPUS is already running by identifying the window.
+ Returns:
False if Application is currently not running on OS
True if Application is currently running on OS
"""
+ assert sys.platform == "win32"
+
# FindWindow(className, windowName)
# className: String, The window class name to find, else None
# windowName: String, The window name (ie,title) to find, else None
@@ -246,49 +289,26 @@ def opus_application_running(self):
opus_windows_name,
):
return True
+ return False
except win32ui.error:
return False
- def test_setup(self):
- if sys.platform != "win32":
- return
-
- opus_is_running = self.opus_application_running()
- if not opus_is_running:
- self.start_opus()
- try_count = 0
- while try_count < 10:
- if self.opus_application_running():
- break
- try_count += 1
- time.sleep(6)
-
- assert self.opus_application_running()
- assert self.__test_dde_connection
-
- self.load_experiment()
- time.sleep(2)
-
- self.start_macro()
- time.sleep(10)
-
- self.stop_macro()
+ def low_sun_angle_present(self) -> bool:
+ """Checks defined sun angle in config. Closes OPUS at the end of the day to start up fresh
+ the next day."""
- def low_sun_angle_present(self):
- """OPUS closes at the end of the day to start up fresh the next day."""
+ assert sys.platform == "win32"
- # sleep while sun angle is too low
- if Astronomy.get_current_sun_elevation().is_within_bounds(
- None, self._CONFIG["general"]["min_sun_elevation"] * Astronomy.units.deg
- ):
- return True
- else:
- return False
+ sun_angle_is_low: bool = utils.Astronomy.get_current_sun_elevation().is_within_bounds(
+ None, self._CONFIG["general"]["min_sun_elevation"] * utils.Astronomy.units.deg
+ )
+ return sun_angle_is_low
- def automated_process_handling(self):
+ def automated_process_handling(self) -> None:
"""Start OPUS.exe if not running and sun angle conditions satisfied.
Shuts down OPUS.exe if running and sun angle conditions not satisfied.
"""
+ assert sys.platform == "win32"
if not self.low_sun_angle_present():
# start OPUS if not currently running
@@ -310,24 +330,29 @@ def automated_process_handling(self):
time.sleep(5)
self.close_opus()
- def wait_for_opus_startup(self):
+ def wait_for_opus_startup(self) -> None:
"""Checks for OPUS to be running. Breaks out of the loop after a defined time."""
+ assert sys.platform == "win32"
+
start_time = time.time()
while True:
+ # brakes when OPUS is up and running
if self.opus_application_running():
break
time.sleep(0.5)
+ # breaks after 60s of waiting
if time.time() - start_time > 60:
break
- def check_for_experiment_change(self):
+ def check_for_experiment_change(self) -> None:
"""Compares the experiment in the config with the current active experiment.
To reload an experiment during an active macro the macro needs to be stopped first.
"""
+ assert sys.platform == "win32"
if self._CONFIG["opus"]["experiment_path"] != self.current_experiment:
- if StateInterface.read(persistent=True)["active_opus_macro_id"] == None:
+ if interfaces.StateInterface.read_persistent()["active_opus_macro_id"] == None:
self.load_experiment()
else:
self.stop_macro()
@@ -335,3 +360,29 @@ def check_for_experiment_change(self):
self.load_experiment()
time.sleep(5)
self.start_macro()
+
+ def test_setup(self) -> None:
+ """Function to test the functonality of this module. Starts up OPUS, loads an experiment,
+ starts a macro and stops it after 10s."""
+ assert sys.platform == "win32"
+
+ opus_is_running = self.opus_application_running()
+ if not opus_is_running:
+ self.start_opus()
+ try_count = 0
+ while try_count < 10:
+ if self.opus_application_running():
+ break
+ try_count += 1
+ time.sleep(6)
+
+ assert self.opus_application_running()
+ assert self.__test_dde_connection()
+
+ self.load_experiment()
+ time.sleep(2)
+
+ self.start_macro()
+ time.sleep(10)
+
+ self.stop_macro()
diff --git a/packages/core/modules/sun_tracking.py b/packages/core/modules/sun_tracking.py
index ad65b17b..fc370581 100644
--- a/packages/core/modules/sun_tracking.py
+++ b/packages/core/modules/sun_tracking.py
@@ -1,44 +1,63 @@
-# This is an Implementation this for the "Camtracker" software
-# Later, we will make an abstract base class that enforces a standard
-# interface to be implemented for any software like "Camtracker"
-
import os
-import sys
import time
import jdcal
import datetime
-from packages.core.utils import StateInterface, Logger, OSInterface
+from packages.core import types, utils, interfaces
-logger = Logger(origin="sun-tracking")
+logger = utils.Logger(origin="sun-tracking")
class SunTracking:
- def __init__(self, initial_config: dict):
+ """SunTracking manages the software CamTracker. CamTracker controls moveable mirrors attached
+ to the FTIR spectrometer EM27. These mirrors are sync with the current sun position to ensure
+ direct sun light to be directed into the instrument. SunTracking will initialize CamTracker
+ according to the current value of StateInterface: measurements_should_be_running.
+
+ These mirrors are initialized at startup of CamTracker if it is called with the additional
+ parameter "-autostart". CamTracker can be gracefully shut down with creating a stop.txt file
+ in its directory. CamTracker creates multiple logfiles at run time that give information on
+ its current status of tracking the sun. Most importantly motor offsets tells the difference
+ between current sun angle and calculated sun positions. It happens from time to time that
+ SunTracker fails to track the sun and is unable to reinitialize the tracking. If a certain
+ motor offset threshold is reached the only way to fix the offset is to restart CamTracker.
+ """
+
+ def __init__(self, initial_config: types.ConfigDict):
self._CONFIG = initial_config
self.last_start_time = time.time()
if self._CONFIG["general"]["test_mode"]:
return
- def run(self, new_config: dict):
+ def run(self, new_config: types.ConfigDict) -> None:
+ """Called in every cycle of the main loop.
+ Redas StateInterface: measurements_should_be_running and starts and stops CamTracker
+ tracking."""
+
+ # update to latest config
self._CONFIG = new_config
+
+ # Skip rest of the function if test mode is active
if self._CONFIG["general"]["test_mode"]:
logger.debug("Skipping SunTracking in test mode")
return
logger.info("Running SunTracking")
- measurements_should_be_running = StateInterface.read()[
+ # check for automation state flank changes
+ measurements_should_be_running = interfaces.StateInterface.read()[
"measurements_should_be_running"
]
# main logic for active automation
+ # start sun tracking if supposed to be running and not active
if measurements_should_be_running and not self.ct_application_running():
logger.info("Start CamTracker")
self.start_sun_tracking_automation()
self.last_start_time = time.time()
return
+ # stops sun tracking if supposed to be not running and active
if not measurements_should_be_running and self.ct_application_running():
logger.info("Stop CamTracker")
self.stop_sun_tracking_automation()
@@ -46,17 +65,27 @@ def run(self, new_config: dict):
# check motor offset, if over params.threshold prepare to
# shutdown CamTracker. Will be restarted in next run() cycle.
- if (
- self.ct_application_running()
- and (time.time() - self.last_start_time) > 300
- and not self.validate_tracker_position()
- ):
- logger.info("CamTracker Motor Position is over threshold.")
- logger.info("Stop CamTracker. Preparing for reinitialization.")
+ # is only considered if tracking is already up for at least 5 minutes.
+ if not self.ct_application_running():
+ logger.debug("CamTracker is not running")
+ return
+
+ if (time.time() - self.last_start_time) < 300:
+ logger.debug(
+ "Skipping motor validation when CamTracker "
+ + "has been started less than 5 minutes ago"
+ )
+ return
+
+ if self.validate_tracker_position():
+ logger.debug("CamTracker motor position is valid.")
+ else:
+ logger.info("CamTracker motor position is over threshold.")
+ logger.info("Stopping CamTracker. Preparing for reinitialization.")
self.stop_sun_tracking_automation()
- def ct_application_running(self):
- """Checks if CamTracker is already running by identifying the window.
+ def ct_application_running(self) -> bool:
+ """Checks if CamTracker is already running by identifying the active window.
False if Application is currently not running on OS
True if Application is currently running on OS
@@ -65,7 +94,9 @@ def ct_application_running(self):
ct_path = self._CONFIG["camtracker"]["executable_path"]
process_name = os.path.basename(ct_path)
- return OSInterface.get_process_status(process_name) in [
+ # TODO: Check whether this list makes sense with
+ # respect to psutil's return types
+ return interfaces.OSInterface.get_process_status(process_name) in [
"running",
"start_pending",
"continue_pending",
@@ -73,44 +104,47 @@ def ct_application_running(self):
"paused",
]
- def start_sun_tracking_automation(self):
- """Uses os.startfile() to start up the CamTracker
- executable with additional parameter -automation.
- The paramter - automation will instruct CamTracker to automatically
- move the mirrors to the expected sun position during startup.
+ def start_sun_tracking_automation(self) -> None:
+ """Uses os.startfile() to start up the CamTracker executable with additional parameter
+ "-automation". The paramter - automation will instruct CamTracker to automatically move the
+ mirrors to the expected sun position during startup.
+
+ Removes stop.txt file in CamTracker directory if present. This file is the current way of
+ gracefully shutting down CamTracker and move the mirrors back to parking position.
"""
+
# delete stop.txt file in camtracker folder if present
- self.clean_stop_file()
+ self.remove_stop_file()
ct_path = self._CONFIG["camtracker"]["executable_path"]
# works only > python3.10
# without cwd CT will have trouble loading its internal database)
- os.startfile(
- os.path.basename(ct_path),
- cwd=os.path.dirname(ct_path),
- arguments="-autostart",
- show_cmd=2,
- )
-
- def stop_sun_tracking_automation(self):
+ try:
+ os.startfile( # type: ignore
+ os.path.basename(ct_path),
+ cwd=os.path.dirname(ct_path),
+ arguments="-autostart",
+ show_cmd=2,
+ )
+ except AttributeError:
+ pass
+
+ def stop_sun_tracking_automation(self) -> None:
"""Tells the CamTracker application to end program and move mirrors
to parking position.
- CamTracker has an internal check for a stop.txt file in its directory
- and will do a clean shutdown.
+ CamTracker has an internal check for a stop.txt file in its directory.
+ After detection it will move it's mirrors to parking position and end itself.
"""
# create stop.txt file in camtracker folder
camtracker_directory = os.path.dirname(self._CONFIG["camtracker"]["executable_path"])
+ with open(os.path.join(camtracker_directory, "stop.txt"), "w") as f:
+ f.write("")
- f = open(os.path.join(camtracker_directory, "stop.txt"), "w")
- f.close()
-
- def clean_stop_file(self):
- """CamTracker needs a stop.txt file to safely shutdown.
- This file needs to be removed after CamTracker shutdown.
- """
+ def remove_stop_file(self) -> None:
+ """This function removes the stop.txt file to allow CamTracker to restart."""
camtracker_directory = os.path.dirname(self._CONFIG["camtracker"]["executable_path"])
stop_file_path = os.path.join(camtracker_directory, "stop.txt")
@@ -118,122 +152,85 @@ def clean_stop_file(self):
if os.path.exists(stop_file_path):
os.remove(stop_file_path)
- def read_ct_log_learn_az_elev(self):
+ def read_ct_log_learn_az_elev(self) -> tuple[float, float, float, float, float, float]:
"""Reads the CamTracker Logfile: LEARN_Az_Elev.dat.
- Returns a list of string parameter:
- [
- Julian Date,
- Tracker Elevation,
- Tracker Azimuth,
- Elev Offset from Astro,
- Az Offset from Astro,
- Ellipse distance/px
+ Returns a list of string parameter: [
+ Julian Date,
+ Tracker Elevation,
+ Tracker Azimuth,
+ Elev Offset from Astro,
+ Az Offset from Astro,
+ Ellipse distance/px
]
+
+ Raises AssertionError if log file is invalid.
"""
- # read azimuth and elevation motor offsets from camtracker logfiles
- target = self._CONFIG["camtracker"]["learn_az_elev_path"]
- if not os.path.isfile(target):
- return [None, None, None, None, None, None]
+ # read azimuth and elevation motor offsets from camtracker logfiles
+ ct_logfile_path = self._CONFIG["camtracker"]["learn_az_elev_path"]
+ assert os.path.isfile(ct_logfile_path), "camtracker logfile not found"
- f = open(target, "r")
- last_line = f.readlines()[-1]
- f.close()
+ # TODO: Seek the last line directly instead of reading the whole file
+ # See https://stackoverflow.com/a/54278929/8255842
+ with open(ct_logfile_path) as f:
+ last_line = f.readlines()[-1]
# last_line: [Julian Date, Tracker Elevation, Tracker Azimuth,
# Elev Offset from Astro, Az Offset from Astro, Ellipse distance/px]
- last_line = last_line.replace(" ", "").replace("\n", "").split(",")
-
- # convert julian day to greg calendar as tuple (Year, Month, Day)
- jddate = jdcal.jd2gcal(float(last_line[0]), 0)[:3]
-
- # get current date(example below)
- # date = (Year, Month, Day)
- now = datetime.datetime.now()
- date = (now.year, now.month, now.day)
+ str_values = last_line.replace(" ", "").replace("\n", "").split(",")
- # if the in the log file read date is up-to-date
- if date == jddate:
- return last_line
- else:
- return [None, None, None, None, None, None]
-
- def __read_ct_log_sunintensity(self):
- """Reads the CamTracker Logile: SunIntensity.dat.
-
- Returns the sun intensity as either 'good', 'bad', 'None'.
- """
- # check sun status logged by camtracker
- target = self._CONFIG["camtracker"]["sun_intensity_path"]
-
- if not os.path.isfile(target):
- return
-
- f = open(target, "r")
- last_line = f.readlines()[-1]
- f.close()
-
- sun_intensity = last_line.split(",")[3].replace(" ", "").replace("\n", "")
+ try:
+ assert len(str_values) == 6
+ float_values = tuple([float(v) for v in str_values])
+ except (AssertionError, ValueError):
+ raise AssertionError(f'invalid last logfile line "{last_line}"')
# convert julian day to greg calendar as tuple (Year, Month, Day)
- jddate = jdcal.jd2gcal(
- float(last_line.replace(" ", "").replace("\n", "").split(",")[0]), 0
- )[:3]
+ jddate = jdcal.jd2gcal(float_values[0], 0)[:3]
- # get current date(example below)
- # date = (Year, Month, Day)
+ # assert that the log file is up-to-date
now = datetime.datetime.now()
- date = (now.year, now.month, now.day)
+ assert jddate == (
+ now.year,
+ now.month,
+ now.day,
+ ), f'date in file is too old: "{last_line}"'
- # if file is up to date
- if date == jddate:
- # returns either 'good' or 'bad'
- return sun_intensity
+ return float_values # type: ignore
- def validate_tracker_position(self):
+ def validate_tracker_position(self) -> bool:
"""Reads motor offsets and compares it with defined threshold.
+ The motor offset defines the difference between the current active and calculated sun
+ angle.
Returns
True -> Offsets are within threshold
False -> CamTracker lost sun position
"""
+ # fails if file integrity is broken
tracker_status = self.read_ct_log_learn_az_elev()
- if None in tracker_status:
- return
-
- elev_offset = float(tracker_status[3])
- az_offeset = float(tracker_status[4])
- threshold = float(self._CONFIG["camtracker"]["motor_offset_threshold"])
+ elev_offset: float = tracker_status[3]
+ az_offeset: float = tracker_status[4]
+ threshold: float = self._CONFIG["camtracker"]["motor_offset_threshold"]
- if (abs(elev_offset) > threshold) or (abs(az_offeset) > threshold):
- return False
+ return (abs(elev_offset) <= threshold) and (abs(az_offeset) <= threshold)
- return True
-
- def test_setup(self):
- if sys.platform != "win32":
- return
-
- ct_is_running = self.ct_application_running
- if not ct_is_running:
+ def test_setup(self) -> None:
+ """
+ Function to test the functonality of this module. Starts up CamTracker to initialize the
+ tracking mirrors. Then moves mirrors back to parking position and shuts dosn CamTracker.
+ """
+ if not self.ct_application_running():
self.start_sun_tracking_automation()
- try_count = 0
- while try_count < 10:
- if self.ct_application_running:
+ for _ in range(10):
+ if self.ct_application_running():
break
- try_count += 1
time.sleep(6)
- assert self.ct_application_running
-
- # time.sleep(20)
-
+ assert self.ct_application_running()
self.stop_sun_tracking_automation()
time.sleep(10)
-
- assert not self.ct_application_running
-
- assert False
+ assert not self.ct_application_running()
diff --git a/packages/core/modules/system_checks.py b/packages/core/modules/system_checks.py
index c10f8a13..f498e5e1 100644
--- a/packages/core/modules/system_checks.py
+++ b/packages/core/modules/system_checks.py
@@ -1,37 +1,41 @@
-from packages.core.utils import Logger, OSInterface
-from packages.core.utils.interfaces.state_interface import StateInterface
+from packages.core import types, utils, interfaces
-logger = Logger(origin="system-checks")
+logger = utils.Logger(origin="system-checks")
class SystemChecks:
- def __init__(self, initial_config: dict):
+ """SystemChecks interacts with the present Operating System through OSInterface. It checks and
+ logs important parameters (CPU, memory, disk space) to give insight into the overall system
+ stability. It raises custom errors when the disk runs out of storage or the energy supply is
+ not ensured. SystemChecks writes the latest readout into StateInterface."""
+
+ def __init__(self, initial_config: types.ConfigDict):
self._CONFIG = initial_config
- def run(self, new_config: dict):
+ def run(self, new_config: types.ConfigDict) -> None:
self._CONFIG = new_config
logger.info("Running SystemChecks")
# check os system stability
- cpu_usage = OSInterface.get_cpu_usage()
+ cpu_usage = interfaces.OSInterface.get_cpu_usage()
logger.debug(f"Current CPU usage for all cores is {cpu_usage}%.")
- memory_usage = OSInterface.get_memory_usage()
+ memory_usage = interfaces.OSInterface.get_memory_usage()
logger.debug(f"Current v_memory usage for the system is {memory_usage}.")
- last_boot_time = OSInterface.get_last_boot_time()
+ last_boot_time = interfaces.OSInterface.get_last_boot_time()
logger.debug(f"The system is running since {last_boot_time}.")
- disk_space = OSInterface.get_disk_space()
+ disk_space = interfaces.OSInterface.get_disk_space()
logger.debug(f"The disk is currently filled with {disk_space}%.")
# raises error if disk_space is below 10%
- OSInterface.validate_disk_space()
+ interfaces.OSInterface.validate_disk_space()
# raises error if system battery is below 20%
- OSInterface.validate_system_battery()
+ interfaces.OSInterface.validate_system_battery()
- StateInterface.update(
+ interfaces.StateInterface.update(
{
"os_state": {
"cpu_usage": cpu_usage,
diff --git a/packages/core/modules/vbdsd.py b/packages/core/modules/vbdsd.py
deleted file mode 100644
index 63b2e0e8..00000000
--- a/packages/core/modules/vbdsd.py
+++ /dev/null
@@ -1,363 +0,0 @@
-from datetime import datetime
-import os
-import queue
-import threading
-import time
-import cv2 as cv
-import numpy as np
-from packages.core.utils import (
- ConfigInterface,
- StateInterface,
- Logger,
- RingList,
- Astronomy,
- ImageProcessing,
-)
-
-logger = Logger(origin="vbdsd")
-
-dir = os.path.dirname
-PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
-IMG_DIR = os.path.join(PROJECT_DIR, "logs", "vbdsd")
-AUTOEXPOSURE_IMG_DIR = os.path.join(PROJECT_DIR, "logs", "vbdsd-autoexposure")
-_CONFIG = None
-
-
-class CameraError(Exception):
- pass
-
-
-class _VBDSD:
- cam = None
- current_exposure = None
- last_autoexposure_time = 0
- available_exposures = None
-
- @staticmethod
- def init(camera_id: int, retries: int = 5):
- # TODO: Why is this necessary?
- _VBDSD.cam = cv.VideoCapture(camera_id, cv.CAP_DSHOW)
- _VBDSD.cam.release()
-
- for _ in range(retries):
- _VBDSD.cam = cv.VideoCapture(camera_id, cv.CAP_DSHOW)
- if _VBDSD.cam.isOpened():
-
- if _VBDSD.available_exposures is None:
- _VBDSD.available_exposures = _VBDSD.get_available_exposures()
- logger.debug(
- f"determined available exposures: {_VBDSD.available_exposures}"
- )
- assert (
- len(_VBDSD.available_exposures) > 0
- ), "did not find any available exposures"
-
- _VBDSD.current_exposure = min(_VBDSD.available_exposures)
- _VBDSD.update_camera_settings(
- width=1280,
- height=720,
- exposure=min(_VBDSD.available_exposures),
- brightness=64,
- contrast=64,
- saturation=0,
- gain=0,
- )
- return
- else:
- time.sleep(2)
-
- raise CameraError("could not initialize camera")
-
- @staticmethod
- def deinit():
- if _VBDSD.cam is not None:
- _VBDSD.cam.release()
- _VBDSD.cam = None
-
- @staticmethod
- def get_available_exposures() -> list[int]:
- possible_values = []
- for exposure in range(-20, 20):
- _VBDSD.cam.set(cv.CAP_PROP_EXPOSURE, exposure)
- if _VBDSD.cam.get(cv.CAP_PROP_EXPOSURE) == exposure:
- possible_values.append(exposure)
-
- return possible_values
-
- @staticmethod
- def update_camera_settings(
- exposure: int = None,
- brightness: int = None,
- contrast: int = None,
- saturation: int = None,
- gain: int = None,
- width: int = None,
- height: int = None,
- ):
- # which settings are available depends on the camera model.
- # however, this function will throw an AssertionError, when
- # the value could not be changed
- properties = {
- "width": (cv.CAP_PROP_FRAME_WIDTH, width),
- "height": (cv.CAP_PROP_FRAME_HEIGHT, height),
- "exposure": (cv.CAP_PROP_EXPOSURE, exposure),
- "brightness": (cv.CAP_PROP_BRIGHTNESS, brightness),
- "contrast": (cv.CAP_PROP_CONTRAST, contrast),
- "saturation": (cv.CAP_PROP_SATURATION, saturation),
- "gain": (cv.CAP_PROP_GAIN, gain),
- }
- for property_name in properties:
- key, value = properties[property_name]
- if value is not None:
- _VBDSD.cam.set(key, value)
- if property_name not in ["width", "height"]:
- new_value = _VBDSD.cam.get(key)
- assert (
- new_value == value
- ), f"could not set {property_name} to {value}, value is still at {new_value}"
-
- # throw away some images after changing settings. I don't know
- # why this is necessary, but it resolved a lot of issues
- for _ in range(2):
- _VBDSD.cam.read()
-
- @staticmethod
- def take_image(retries: int = 10, trow_away_white_images: bool = True) -> cv.Mat:
- assert _VBDSD.cam is not None, "camera is not initialized yet"
- if not _VBDSD.cam.isOpened():
- raise CameraError("camera is not open")
- for _ in range(retries + 1):
- ret, frame = _VBDSD.cam.read()
- if ret:
- if trow_away_white_images and np.mean(frame) > 240:
- # image is mostly white
- continue
- return frame
- raise CameraError("could not take image")
-
- @staticmethod
- def adjust_exposure():
- """
- set exposure to the value where the overall
- mean pixel value color is closest to 100
- """
- exposure_results = []
- for e in _VBDSD.available_exposures:
- _VBDSD.update_camera_settings(exposure=e)
- img = _VBDSD.take_image(trow_away_white_images=False)
- mean_color = round(np.mean(img), 3)
- exposure_results.append({"exposure": e, "mean": mean_color})
- img = ImageProcessing.add_text_to_image(
- img, f"mean={mean_color}", color=(0, 0, 255)
- )
- cv.imwrite(os.path.join(AUTOEXPOSURE_IMG_DIR, f"exposure-{e}.jpg"), img)
-
- logger.debug(f"exposure results: {exposure_results}")
- new_exposure = min(exposure_results, key=lambda r: abs(r["mean"] - 50))["exposure"]
-
- _VBDSD.update_camera_settings(exposure=new_exposure)
-
- if new_exposure != _VBDSD.current_exposure:
- logger.info(f"changing exposure: {_VBDSD.current_exposure} -> {new_exposure}")
- _VBDSD.current_exposure = new_exposure
-
- @staticmethod
- def determine_frame_status(frame: cv.Mat, save_image: bool) -> int:
- # transform image from 1280x720 to 640x360
- downscaled_image = cv.resize(frame, None, fx=0.5, fy=0.5)
-
- # for each rgb pixel [234,234,234] only consider the gray value (234)
- single_valued_pixels = cv.cvtColor(downscaled_image, cv.COLOR_BGR2GRAY)
-
- # determine lense position and size from binary mask
- binary_mask = ImageProcessing.get_binary_mask(single_valued_pixels)
- circle_cx, circle_cy, circle_r = ImageProcessing.get_circle_location(binary_mask)
-
- # only consider edges and make them bold
- edges_only = np.array(cv.Canny(single_valued_pixels, 40, 40), dtype=np.float32)
- edges_only_dilated = cv.dilate(
- edges_only, cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))
- )
-
- # blacken the outer 10% of the circle radius
- edges_only_dilated *= ImageProcessing.get_circle_mask(
- edges_only_dilated.shape, circle_r * 0.9, circle_cx, circle_cy
- )
-
- # determine how many pixels inside the circle are made up of "edge pixels"
- edge_fraction = round((np.sum(edges_only_dilated) / 255) / np.sum(binary_mask), 6)
-
- # TODO: the values below should be adjusted by looking at the ifgs directly
- status = 1 if (edge_fraction > 0.02) else 0
-
- logger.debug(f"exposure = {_VBDSD.current_exposure}, edge_fraction = {edge_fraction}")
-
- if save_image:
- image_timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
- raw_image_name = f"{image_timestamp}-{status}-raw.jpg"
- processed_image_name = f"{image_timestamp}-{status}-processed.jpg"
- processed_frame = ImageProcessing.add_markings_to_image(
- edges_only_dilated, edge_fraction, circle_cx, circle_cy, circle_r
- )
- cv.imwrite(os.path.join(IMG_DIR, raw_image_name), frame)
- cv.imwrite(os.path.join(IMG_DIR, processed_image_name), processed_frame)
-
- return status
-
- @staticmethod
- def run(save_image: bool) -> int:
- # run autoexposure function every 3 minutes
- now = time.time()
- if (now - _VBDSD.last_autoexposure_time) > 180:
- _VBDSD.adjust_exposure()
- _VBDSD.last_autoexposure_time = now
-
- frame = _VBDSD.take_image()
- return _VBDSD.determine_frame_status(frame, save_image)
-
-
-class VBDSD_Thread:
- def __init__(self):
- self.__thread = None
- self.__shared_queue = queue.Queue()
-
- def start(self):
- """
- Start a thread using the multiprocessing library
- """
- logger.info("Starting thread")
- self.__thread = threading.Thread(target=VBDSD_Thread.main, args=(self.__shared_queue,))
- self.__thread.start()
-
- def is_running(self):
- return self.__thread is not None
-
- def stop(self):
- """
- Stop the thread and set the state to 'null'
- """
-
- logger.info("Sending termination signal")
- self.__shared_queue.put("stop")
-
- logger.info("Waiting for thread to terminate")
- self.__thread.join()
-
- logger.debug('Setting state to "null"')
- StateInterface.update({"vbdsd_indicates_good_conditions": None})
-
- self.__thread = None
-
- @staticmethod
- def main(shared_queue: queue.Queue, infinite_loop: bool = True, headless: bool = False):
- global logger
- global _CONFIG
-
- # headless mode = don't use logger, just print messages to console, always save images
- if headless:
- logger = Logger(origin="vbdsd", just_print=True)
- _CONFIG = ConfigInterface.read()
-
- status_history = RingList(_CONFIG["vbdsd"]["evaluation_size"])
- current_state = None
-
- repeated_camera_error_count = 0
-
- while True:
- # Check for termination
- try:
- if shared_queue.get(block=False) == "stop":
- _VBDSD.deinit()
- break
- except queue.Empty:
- pass
-
- try:
- start_time = time.time()
- _CONFIG = ConfigInterface.read()
-
- # init camera connection
- if _VBDSD.cam is None:
- logger.info(f"Initializing VBDSD camera")
- _VBDSD.init(_CONFIG["vbdsd"]["camera_id"])
-
- # reinit if parameter changes
- new_size = _CONFIG["vbdsd"]["evaluation_size"]
- if status_history.maxsize() != new_size:
- logger.debug(
- "Size of VBDSD history has changed: "
- + f"{status_history.maxsize()} -> {new_size}"
- )
- status_history.reinitialize(new_size)
-
- # sleep while sun angle is too low
- if (not headless) and Astronomy.get_current_sun_elevation().is_within_bounds(
- None, _CONFIG["general"]["min_sun_elevation"] * Astronomy.units.deg
- ):
- logger.debug("Current sun elevation below minimum: Waiting 5 minutes")
- if current_state != None:
- StateInterface.update({"vbdsd_indicates_good_conditions": False})
- current_state = None
- # reinit for next day
- _VBDSD.reinit_settings()
- time.sleep(300)
- continue
-
- # take a picture and process it: status is in [0, 1]
- # a CameraError is allowed to happen 3 times in a row
- # at the 4th time the camera is not able to take an image
- # an Exception will be raised (and VBDSD will be restarted)
- try:
- status = _VBDSD.run(headless or _CONFIG["vbdsd"]["save_images"])
- repeated_camera_error_count = 0
- except CameraError as e:
- repeated_camera_error_count += 1
- if repeated_camera_error_count > 3:
- raise e
- else:
- logger.debug(
- f"camera occured ({repeated_camera_error_count} time(s) in a row). "
- + "sleeping 15 seconds, reconnecting camera"
- )
- _VBDSD.deinit()
- time.sleep(15)
- continue
-
- # append sun status to status history
- status_history.append(0 if (status == -1) else status)
- logger.debug(
- f"New VBDSD status: {status}. Current history: {status_history.get()}"
- )
-
- # evaluate sun state only if list is filled
- new_state = None
- if status_history.size() == status_history.maxsize():
- score = status_history.sum() / status_history.size()
- new_state = score > _CONFIG["vbdsd"]["measurement_threshold"]
-
- if current_state != new_state:
- logger.info(
- f"State change: {'BAD -> GOOD' if (new_state == True) else 'GOOD -> BAD'}"
- )
- StateInterface.update({"vbdsd_indicates_good_conditions": new_state})
- current_state = new_state
-
- # wait rest of loop time
- elapsed_time = time.time() - start_time
- time_to_wait = _CONFIG["vbdsd"]["seconds_per_interval"] - elapsed_time
- if time_to_wait > 0:
- logger.debug(
- f"Finished iteration, waiting {round(time_to_wait, 2)} second(s)."
- )
- time.sleep(time_to_wait)
-
- if not infinite_loop:
- return status_history
-
- except Exception as e:
- status_history.empty()
- _VBDSD.deinit()
-
- logger.error(f"error in VBDSD thread: {repr(e)}")
- logger.info(f"sleeping 30 seconds, reinitializing VBDSD thread")
- time.sleep(30)
diff --git a/packages/core/threads/__init__.py b/packages/core/threads/__init__.py
new file mode 100644
index 00000000..838f37de
--- /dev/null
+++ b/packages/core/threads/__init__.py
@@ -0,0 +1,2 @@
+from .helios_thread import HeliosThread
+from .upload_thread import UploadThread
diff --git a/packages/core/threads/helios_thread.py b/packages/core/threads/helios_thread.py
new file mode 100644
index 00000000..50e22877
--- /dev/null
+++ b/packages/core/threads/helios_thread.py
@@ -0,0 +1,449 @@
+from datetime import datetime
+import os
+import threading
+import time
+import cv2 as cv
+import numpy as np
+from typing import Any, Literal, Optional
+from packages.core import types, utils, interfaces
+
+logger = utils.Logger(origin="helios")
+
+dir = os.path.dirname
+PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
+IMG_DIR = os.path.join(PROJECT_DIR, "logs", "helios")
+AUTOEXPOSURE_IMG_DIR = os.path.join(PROJECT_DIR, "logs", "helios-autoexposure")
+_CONFIG: Optional[types.ConfigDict] = None
+
+
+class CameraError(Exception):
+ pass
+
+
+class _Helios:
+ cam: Optional[Any] = None
+ current_exposure = None
+ last_autoexposure_time = 0.0
+ available_exposures = None
+
+ @staticmethod
+ def init(camera_id: int, retries: int = 5) -> None:
+ # TODO: Why is this necessary?
+ _Helios.cam = cv.VideoCapture(camera_id, cv.CAP_DSHOW)
+ assert _Helios.cam is not None
+ _Helios.cam.release()
+
+ for _ in range(retries):
+ _Helios.cam = cv.VideoCapture(camera_id, cv.CAP_DSHOW)
+ assert _Helios.cam is not None
+
+ if _Helios.cam.isOpened():
+ if _Helios.available_exposures is None:
+ _Helios.available_exposures = _Helios.get_available_exposures()
+ logger.debug(
+ f"determined available exposures: {_Helios.available_exposures}"
+ )
+ assert (
+ len(_Helios.available_exposures) > 0
+ ), "did not find any available exposures"
+
+ _Helios.current_exposure = min(_Helios.available_exposures)
+ _Helios.update_camera_settings(
+ width=1280,
+ height=720,
+ exposure=min(_Helios.available_exposures),
+ brightness=64,
+ contrast=64,
+ saturation=0,
+ gain=0,
+ )
+ return
+ else:
+ time.sleep(2)
+
+ raise CameraError("could not initialize camera")
+
+ @staticmethod
+ def deinit() -> None:
+ """
+ Possibly release the camera (linked over cv2.VideoCapture)
+ """
+ if _Helios.cam is not None:
+ _Helios.cam.release()
+ _Helios.cam = None
+
+ @staticmethod
+ def get_available_exposures() -> list[int]:
+ """
+ Loop over every integer in [-20, ..., +20] and try to set
+ the camera exposure to each value. Return a list of integers
+ that the camera accepted as an exposure setting.
+ """
+ assert _Helios.cam is not None, "camera is not initialized yet"
+
+ possible_values = []
+ for exposure in range(-20, 20):
+ _Helios.cam.set(cv.CAP_PROP_EXPOSURE, exposure)
+ if _Helios.cam.get(cv.CAP_PROP_EXPOSURE) == exposure:
+ possible_values.append(exposure)
+
+ return possible_values
+
+ @staticmethod
+ def update_camera_settings(
+ exposure: Optional[int] = None,
+ brightness: Optional[int] = None,
+ contrast: Optional[int] = None,
+ saturation: Optional[int] = None,
+ gain: Optional[int] = None,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ ) -> None:
+ """
+ Update the settings of the connected camera. Which settings are
+ available depends on the camera model. However, this function will
+ throw an AssertionError, when the value could not be changed.
+ """
+ assert _Helios.cam is not None, "camera is not initialized yet"
+
+ properties = {
+ "width": (cv.CAP_PROP_FRAME_WIDTH, width),
+ "height": (cv.CAP_PROP_FRAME_HEIGHT, height),
+ "exposure": (cv.CAP_PROP_EXPOSURE, exposure),
+ "brightness": (cv.CAP_PROP_BRIGHTNESS, brightness),
+ "contrast": (cv.CAP_PROP_CONTRAST, contrast),
+ "saturation": (cv.CAP_PROP_SATURATION, saturation),
+ "gain": (cv.CAP_PROP_GAIN, gain),
+ }
+ for property_name in properties:
+ key, value = properties[property_name]
+ if value is not None:
+ _Helios.cam.set(key, value)
+ if property_name not in ["width", "height"]:
+ new_value = _Helios.cam.get(key)
+ assert (
+ new_value == value
+ ), f"could not set {property_name} to {value}, value is still at {new_value}"
+
+ # throw away some images after changing settings. I don't know
+ # why this is necessary, but it resolves a lot of issues
+ for _ in range(2):
+ _Helios.cam.read()
+
+ @staticmethod
+ def take_image(retries: int = 10, trow_away_white_images: bool = True) -> cv.Mat:
+ """
+ Take an image using the initialized camera. Raises an
+ AssertionError if camera has not been set up.
+
+ Retries up to n times (camera can say "not possible")
+ and throws away all mostly white images (overexposed)
+ except when specified not to (used in autoexposure).
+ """
+ assert _Helios.cam is not None, "camera is not initialized yet"
+
+ if not _Helios.cam.isOpened():
+ raise CameraError("camera is not open")
+ for _ in range(retries + 1):
+ ret, frame = _Helios.cam.read()
+ if ret:
+ if trow_away_white_images and np.mean(frame) > 240:
+ # image is mostly white
+ continue
+ return frame
+ raise CameraError("could not take image")
+
+ @staticmethod
+ def adjust_exposure() -> None:
+ """
+ This function will loop over all available exposures and
+ take one image for each exposure. Then it sets exposure
+ to the value where the overall mean pixel value color is
+ closest to 50.
+ """
+ assert _Helios.available_exposures is not None
+ assert len(_Helios.available_exposures) > 0
+
+ exposure_results = []
+ for e in _Helios.available_exposures:
+ _Helios.update_camera_settings(exposure=e)
+ img = _Helios.take_image(trow_away_white_images=False)
+ mean_color = round(np.mean(img), 3)
+ exposure_results.append({"exposure": e, "mean": mean_color})
+ img = utils.ImageProcessing.add_text_to_image(
+ img, f"mean={mean_color}", color=(0, 0, 255)
+ )
+ cv.imwrite(os.path.join(AUTOEXPOSURE_IMG_DIR, f"exposure-{e}.jpg"), img)
+
+ logger.debug(f"exposure results: {exposure_results}")
+
+ new_exposure = min(exposure_results, key=lambda r: abs(r["mean"] - 50))["exposure"] # type: ignore
+ _Helios.update_camera_settings(exposure=new_exposure)
+
+ if new_exposure != _Helios.current_exposure:
+ logger.info(f"changing exposure: {_Helios.current_exposure} -> {new_exposure}")
+ _Helios.current_exposure = new_exposure
+
+ @staticmethod
+ def determine_frame_status(frame: cv.Mat, save_image: bool) -> Literal[0, 1]:
+ """
+ For a given frame, determine whether the conditions are
+ good (direct sunlight, returns 1) or bad (diffuse light
+ or darkness, returns 0).
+
+ 1. Downscale image (faster processing)
+ 2. Convert to grayscale image
+ 3. Determine position and size of circular opening
+ 4. Determine edges in image (canny edge filter)
+ 5. Only consider edges inside 0.9 * circleradius
+ 6. If number of edge-pixels is > x: return 1; else: return 0;
+ """
+
+ assert _CONFIG is not None
+ assert _CONFIG["helios"] is not None
+
+ # transform image from 1280x720 to 640x360
+ downscaled_image = cv.resize(frame, None, fx=0.5, fy=0.5)
+
+ # for each rgb pixel [234,234,234] only consider the gray value (234)
+ single_valued_pixels = cv.cvtColor(downscaled_image, cv.COLOR_BGR2GRAY)
+
+ # determine lense position and size from binary mask
+ binary_mask = utils.ImageProcessing.get_binary_mask(single_valued_pixels)
+ circle_cx, circle_cy, circle_r = utils.ImageProcessing.get_circle_location(binary_mask)
+
+ # only consider edges and make them bold
+ edges_only: cv.Mat = np.array(cv.Canny(single_valued_pixels, 40, 40), dtype=np.float32)
+ edges_only_dilated: cv.Mat = cv.dilate(
+ edges_only, cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))
+ )
+
+ # blacken the outer 10% of the circle radius
+ edges_only_dilated *= utils.ImageProcessing.get_circle_mask(
+ edges_only_dilated.shape, round(circle_r * 0.9), circle_cx, circle_cy
+ )
+
+ # determine how many pixels inside the circle are made up of "edge pixels"
+ pixels_inside_circle: int = np.sum(binary_mask)
+ status: Literal[1, 0] = 0
+ if pixels_inside_circle != 0:
+ edge_fraction = round((np.sum(edges_only_dilated) / 255) / pixels_inside_circle, 6)
+ sufficient_edge_fraction = (
+ edge_fraction >= _CONFIG["helios"]["edge_detection_threshold"]
+ )
+ status = 1 if sufficient_edge_fraction else 0
+
+ logger.debug(f"exposure = {_Helios.current_exposure}, edge_fraction = {edge_fraction}")
+
+ if save_image:
+ now = datetime.now()
+ img_timestamp = now.strftime("%Y%m%d-%H%M%S")
+ raw_img_name = f"{img_timestamp}-{status}-raw.jpg"
+ processed_img_name = f"{img_timestamp}-{status}-processed.jpg"
+ processed_frame = utils.ImageProcessing.add_markings_to_image(
+ edges_only_dilated, edge_fraction, circle_cx, circle_cy, circle_r
+ )
+ img_directory_path = os.path.join(IMG_DIR, now.strftime("%Y%m%d"))
+ if not os.path.exists(img_directory_path):
+ os.mkdir(img_directory_path)
+ cv.imwrite(os.path.join(img_directory_path, raw_img_name), frame)
+ cv.imwrite(os.path.join(img_directory_path, processed_img_name), processed_frame)
+
+ return status
+
+ @staticmethod
+ def run(save_image: bool) -> Literal[0, 1]:
+ """
+ Take an image and evaluate the sun conditions.
+
+ Run autoexposure function every 3 minutes.
+ """
+ now = time.time()
+ if (now - _Helios.last_autoexposure_time) > 180:
+ _Helios.adjust_exposure()
+ _Helios.last_autoexposure_time = now
+
+ frame = _Helios.take_image()
+ return _Helios.determine_frame_status(frame, save_image)
+
+
+class HeliosThread:
+ """
+ Thread for determining the current sun conditions in a
+ parallel mainloop.
+
+ "Good" sun conditions with respect to EM27 measurements
+ means direct sunlight, i.e. no clouds in front of the
+ sun. Interferograms recored in diffuse light conditions
+ result in a concentration timeseries (after retrieval)
+ with a very large standard deviation.
+
+ Direct sunlight can be determined by "hard" shadows, i.e.
+ quick transitions between light and dark surfaces. This
+ thread periodically takes images in a special camera setup
+ and uses edge detected to determine how many hard shadows
+ it can find in the image.
+
+ The result of this constant sunlight evaluation is written
+ to the StateInterface.
+ """
+
+ def __init__(self, config: types.ConfigDict) -> None:
+ self.__thread = threading.Thread(target=HeliosThread.main)
+ self.__logger: utils.Logger = utils.Logger(origin="helios")
+ self.config: types.ConfigDict = config
+ self.is_initialized = False
+
+ def update_thread_state(self, new_config: types.ConfigDict) -> None:
+ """
+ Make sure that the thread loop is (not) running,
+ based on config.upload
+ """
+ self.config = new_config
+ should_be_running = HeliosThread.should_be_running(self.config)
+
+ if should_be_running and (not self.is_initialized):
+ self.__logger.info("Starting the thread")
+ self.is_initialized = True
+ self.__thread.start()
+
+ # set up a new thread instance for the next time the thread should start
+ if self.is_initialized:
+ if self.__thread.is_alive():
+ self.__logger.debug("Thread is alive")
+ else:
+ self.__logger.debug("Thread is not alive, running teardown")
+ self.__thread.join()
+ self.__thread = threading.Thread(target=HeliosThread.main)
+ self.is_initialized = False
+
+ @staticmethod
+ def should_be_running(config: types.ConfigDict) -> bool:
+ """Should the thread be running? (based on config.upload)"""
+ return (
+ (not config["general"]["test_mode"])
+ and (config["helios"] is not None)
+ and (config["measurement_triggers"]["consider_helios"])
+ )
+
+ @staticmethod
+ def main(headless: bool = False) -> None:
+ """
+ Main entrypoint of the thread.
+
+ headless mode = don't write to log files, print to console, save all images
+ """
+ global logger
+ global _CONFIG
+
+ if headless:
+ logger = utils.Logger(origin="helios", just_print=True)
+ _CONFIG = interfaces.ConfigInterface.read()
+
+ # Check for termination
+ if (_CONFIG["helios"] is None) or (not HeliosThread.should_be_running(_CONFIG)):
+ return
+
+ status_history = utils.RingList(_CONFIG["helios"]["evaluation_size"])
+ current_state = None
+
+ repeated_camera_error_count = 0
+
+ while True:
+ start_time = time.time()
+ _CONFIG = interfaces.ConfigInterface.read()
+
+ # Check for termination
+ if (_CONFIG["helios"] is None) or (not HeliosThread.should_be_running(_CONFIG)):
+ return
+
+ try:
+
+ # init camera connection
+ if _Helios.cam is None:
+ logger.info(f"Initializing Helios camera")
+ _Helios.init(_CONFIG["helios"]["camera_id"])
+
+ # reinit if parameter changes
+ new_size = _CONFIG["helios"]["evaluation_size"]
+ if status_history.maxsize() != new_size:
+ logger.debug(
+ "Size of Helios history has changed: "
+ + f"{status_history.maxsize()} -> {new_size}"
+ )
+ status_history.reinitialize(new_size)
+
+ # sleep while sun angle is too low
+ if (
+ not headless
+ ) and utils.Astronomy.get_current_sun_elevation().is_within_bounds(
+ None, _CONFIG["general"]["min_sun_elevation"] * utils.Astronomy.units.deg
+ ):
+ logger.debug("Current sun elevation below minimum: Waiting 5 minutes")
+ if current_state != None:
+ interfaces.StateInterface.update(
+ {"helios_indicates_good_conditions": False}
+ )
+ current_state = None
+ # reinit for next day
+ _Helios.deinit()
+ time.sleep(300)
+ continue
+
+ # take a picture and process it: status is in [0, 1]
+ # a CameraError is allowed to happen 3 times in a row
+ # at the 4th time the camera is not able to take an image
+ # an Exception will be raised (and Helios will be restarted)
+ try:
+ status = _Helios.run(headless or _CONFIG["helios"]["save_images"])
+ repeated_camera_error_count = 0
+ except CameraError as e:
+ repeated_camera_error_count += 1
+ if repeated_camera_error_count > 3:
+ raise e
+ else:
+ logger.debug(
+ f"camera occured ({repeated_camera_error_count} time(s) in a row). "
+ + "sleeping 15 seconds, reinitializing Helios"
+ )
+ _Helios.deinit()
+ time.sleep(15)
+ continue
+
+ # append sun status to status history
+ status_history.append(status)
+ logger.debug(
+ f"New Helios status: {status}. Current history: {status_history.get()}"
+ )
+
+ # evaluate sun state only if list is filled
+ new_state = None
+ if status_history.size() == status_history.maxsize():
+ score = status_history.sum() / status_history.size()
+ new_state = score > _CONFIG["helios"]["measurement_threshold"]
+
+ if current_state != new_state:
+ logger.info(
+ f"State change: {'BAD -> GOOD' if (new_state == True) else 'GOOD -> BAD'}"
+ )
+ interfaces.StateInterface.update(
+ {"helios_indicates_good_conditions": new_state}
+ )
+ current_state = new_state
+
+ # wait rest of loop time
+ elapsed_time = time.time() - start_time
+ time_to_wait = _CONFIG["helios"]["seconds_per_interval"] - elapsed_time
+ if time_to_wait > 0:
+ logger.debug(
+ f"Finished iteration, waiting {round(time_to_wait, 2)} second(s)."
+ )
+ time.sleep(time_to_wait)
+
+ except Exception as e:
+ status_history.empty()
+ _Helios.deinit()
+
+ logger.error(f"error in HeliosThread: {repr(e)}")
+ logger.info(f"sleeping 30 seconds, reinitializing HeliosThread")
+ time.sleep(30)
diff --git a/packages/core/threads/upload_thread.py b/packages/core/threads/upload_thread.py
new file mode 100644
index 00000000..b30a60c2
--- /dev/null
+++ b/packages/core/threads/upload_thread.py
@@ -0,0 +1,455 @@
+from datetime import datetime
+import hashlib
+import json
+import os
+import shutil
+import threading
+from typing import Optional
+import invoke
+import paramiko
+import time
+import fabric.connection, fabric.transfer
+import re
+import pydantic
+from packages.core import types, utils, interfaces
+
+logger = utils.Logger(origin="upload")
+
+dir = os.path.dirname
+PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
+
+
+class InvalidUploadState(Exception):
+ pass
+
+
+class DirectoryUploadClient:
+ """
+ This is the client that is concerned with uploading one specific
+ directory. run() will perform the actual upload process.
+ """
+
+ def __init__(
+ self,
+ date_string: str,
+ src_path: str,
+ dst_path: str,
+ remove_files_after_upload: bool,
+ connection: fabric.connection.Connection,
+ transfer_process: fabric.transfer.Transfer,
+ ) -> None:
+ self.date_string = date_string
+ self.src_path = src_path
+ self.dst_path = dst_path[:-1] if dst_path.endswith("/") else dst_path
+ self.remove_files_after_upload = remove_files_after_upload
+ self.connection = connection
+ self.transfer_process = transfer_process
+
+ self.src_meta_path = os.path.join(self.src_path, self.date_string, "upload-meta.json")
+ self.dst_meta_path = os.path.join(
+ f"{self.dst_path}/{self.date_string}/upload-meta.json"
+ )
+
+ assert self.transfer_process.is_remote_dir(
+ self.dst_path
+ ), f"remote {self.dst_path} is not a directory"
+
+ self.meta_content: types.UploadMetaDict = {
+ "complete": False,
+ "fileList": [],
+ "createdTime": round(time.time(), 3),
+ "lastModifiedTime": round(time.time(), 3),
+ }
+
+ def __initialize_remote_dir(self) -> None:
+ """
+ If the respective dst directory does not exist,
+ create the directory and add a fresh upload-meta.json
+ file to it.
+ """
+ dst_dir_path = f"{self.dst_path}/{self.date_string}"
+ if not self.transfer_process.is_remote_dir(dst_dir_path):
+ self.connection.run(f"mkdir {dst_dir_path}", hide=True, in_stream=False)
+ with open(self.src_meta_path, "w") as f:
+ json.dump(self.meta_content, f, indent=4)
+ self.transfer_process.put(self.src_meta_path, self.dst_meta_path)
+
+ def __get_remote_directory_checksum(self) -> str:
+ """
+ Calculate checksum over all files listed in the
+ upload-meta.json file. The same logic will run
+ on the local machine - which also has a meta file
+ in its src directory with the same contents.
+
+ This script requires the server to have Python
+ 3.10 installed and will raise an exception if its
+ not present.
+ """
+ local_script_path = os.path.join(PROJECT_DIR, "scripts", "get_upload_dir_checksum.py")
+ remote_script_path = f"{self.dst_path}/get_upload_dir_checksum.py"
+ self.transfer_process.put(local_script_path, remote_script_path)
+
+ try:
+ self.connection.run("python3.10 --version", hide=True, in_stream=False)
+ except invoke.exceptions.UnexpectedExit:
+ raise InvalidUploadState("python3.10 is not installed on the server")
+
+ try:
+ remote_command = (
+ f"python3.10 {remote_script_path} {self.dst_path}/{self.date_string}"
+ )
+ a: invoke.runners.Result = self.connection.run(
+ remote_command, hide=True, in_stream=False
+ )
+ assert a.exited == 0
+ return a.stdout.strip()
+ except (invoke.exceptions.UnexpectedExit, AssertionError) as e:
+ raise InvalidUploadState(
+ f"could not execute remote command on server ({remote_command}): {e}"
+ )
+
+ def __get_local_directory_checksum(self) -> str:
+ """
+ Calculate checksum over all files listed in the
+ upload-meta.json file. The same logic will run
+ on the server - which also has a meta file in
+ its dst directory with the same contents
+ """
+ hasher = hashlib.md5()
+ for filename in sorted(self.meta_content["fileList"]):
+ filepath = os.path.join(self.src_path, self.date_string, filename)
+ with open(filepath, "rb") as f:
+ hasher.update(f.read())
+
+ # output hashsum - with a status code of 0 the programs
+ # stdout is a checksum, otherwise it is a traceback
+ return hasher.hexdigest()
+
+ def __fetch_meta(self) -> None:
+ """
+ Download the remote meta file to the local src directory
+ """
+ if os.path.isfile(self.src_meta_path):
+ os.remove(self.src_meta_path)
+ self.transfer_process.get(self.dst_meta_path, self.src_meta_path)
+ try:
+ assert os.path.isfile(self.src_meta_path)
+ with open(self.src_meta_path, "r") as f:
+ new_meta_content = json.load(f)
+ types.validate_upload_meta_dict(new_meta_content)
+ self.meta_content = new_meta_content
+ except (AssertionError, json.JSONDecodeError, pydantic.ValidationError) as e:
+ raise InvalidUploadState(str(e))
+
+ def __update_meta(
+ self,
+ update: Optional[types.UploadMetaDictPartial] = None,
+ sync_remote_meta: bool = True,
+ ) -> None:
+ """
+ Update the local upload-meta.json file and overwrite
+ the meta file on the server when sync==True
+ """
+ if update is not None:
+ self.meta_content.update(update)
+ self.meta_content.update({"lastModifiedTime": round(time.time(), 3)})
+ with open(self.src_meta_path, "w") as f:
+ json.dump(self.meta_content, f, indent=4)
+
+ if sync_remote_meta:
+ self.transfer_process.put(self.src_meta_path, self.dst_meta_path)
+
+ def run(self) -> None:
+ """
+ Perform the whole upload process for a given directory.
+
+ 1. If the respective remote directory doesn't exist, create it
+ 2. Download the current upload-meta.json file from the server
+ 3. Determine which files have not been uploaded yet
+ 4. Upload every file that is found locally but not in the remote
+ meta. Update the remote meta every 25 uploaded files (reduces
+ load and traffic).
+ 5. Test whether the checksums of "ifgs on server" and "local ifgs"
+ are equal, raise an exception (and end the function) if the differ
+ 6. Indicate that the upload process is complete in remote meta
+ 7. Optionally remove local ifgs
+ """
+
+ self.__initialize_remote_dir()
+ self.__fetch_meta()
+
+ # determine files present in src and dst directory
+ # files should be named like "YYYYMMDD"
+ ifg_file_pattern = re.compile("^.*" + self.date_string + ".*$")
+ raw_src_files = os.listdir(os.path.join(self.src_path, self.date_string))
+ src_file_set = set([f for f in raw_src_files if ifg_file_pattern.match(f)])
+ dst_file_set = set(self.meta_content["fileList"])
+
+ # determine file differences between src and dst
+ files_missing_in_dst = src_file_set.difference(dst_file_set)
+ files_missing_in_src = dst_file_set.difference(src_file_set)
+ if len(files_missing_in_src) > 0:
+ # this happens, when the process fails during the src removal
+ raise InvalidUploadState(
+ f"files present in dst are missing in src: {files_missing_in_src}"
+ )
+
+ # if there are files that have not been uploaded,
+ # assert that the remote meta also indicates an
+ # incomplete upload state
+ if (len(files_missing_in_dst) != 0) and self.meta_content["complete"]:
+ raise InvalidUploadState(
+ "missing files on dst but remote meta contains complete=True"
+ )
+
+ # upload every file that is missing in the remote
+ # meta but present in the local directory
+ for i, f in enumerate(sorted(files_missing_in_dst)):
+ self.transfer_process.put(
+ os.path.join(self.src_path, self.date_string, f),
+ f"{self.dst_path}/{self.date_string}/{f}",
+ )
+ # update the local meta in every loop, but only
+ # sync the remote meta every 25 iterations
+ self.__update_meta(
+ update={"fileList": [*self.meta_content["fileList"], f]},
+ sync_remote_meta=(i % 25 == 0),
+ )
+
+ # make sure that the remote meta is synced
+ self.__update_meta()
+
+ # raise an exception if the checksums do not match
+ remote_checksum = self.__get_remote_directory_checksum()
+ local_checksum = self.__get_local_directory_checksum()
+ if remote_checksum != local_checksum:
+ raise InvalidUploadState(
+ f"checksums do not match, local={local_checksum} "
+ + f"remote={remote_checksum}"
+ )
+
+ # only set meta.complete to True, when the checksums match
+ self.__update_meta(update={"complete": True})
+ logger.info(f"Successfully uploaded {self.date_string}")
+
+ # only remove src if configured and checksums match
+ if self.remove_files_after_upload:
+ shutil.rmtree(os.path.join(self.src_path, self.date_string))
+ logger.debug("Successfully removed source")
+ else:
+ logger.debug("Skipping removal of source")
+
+ @staticmethod
+ def __is_valid_date(date_string: str) -> bool:
+ try:
+ day_ending = datetime.strptime(f"{date_string} 23:59:59", "%Y%m%d %H:%M:%S")
+ seconds_since_day_ending = (datetime.now() - day_ending).total_seconds()
+ assert seconds_since_day_ending >= 3600
+ return True
+ except (ValueError, AssertionError):
+ return False
+
+ @staticmethod
+ def get_directories_to_be_uploaded(data_path: str) -> list[str]:
+ if not os.path.isdir(data_path):
+ return []
+
+ return list(
+ filter(
+ lambda f: os.path.isdir(os.path.join(data_path, f))
+ and DirectoryUploadClient.__is_valid_date(f),
+ os.listdir(data_path),
+ )
+ )
+
+
+class UploadThread:
+ """
+ Thread for uploading all interferograms from a specific
+ directory to a server via SSH. The local files will only
+ be removed (optional) if the files on the server generate
+ the same MD5 checksum as the local files.
+
+ The source directory where OPUS puts the interferograms
+ can be configured with config.upload.src_directory_ifgs.
+ OPUS's output directory should be configured inside the
+ macro file.
+
+ The expected file structure looks like this:
+ 📁 and
+ 📁
+ 📄
+ 📄
+ 📁
+ 📄
+ 📄
+ 📁 ...
+
+ Each YYYYMMDD folder will be uploaded independently. During
+ its upload the process will store its progress inside a file
+ "YYYYMMDD/upload-meta.json" (locally and remotely).
+
+ The upload-meta.json file looks like this:
+ {
+ "complete": bool,
+ "fileList": [, , ...],
+ "createdTime": float,
+ "lastModifiedTime": float
+ }
+
+ This function only does one loop in headless mode.
+ """
+
+ def __init__(self, config: types.ConfigDict) -> None:
+ self.__thread = threading.Thread(target=UploadThread.main)
+ self.__logger: utils.Logger = utils.Logger(origin="upload")
+ self.config: types.ConfigDict = config
+ self.is_initialized = False
+
+ def update_thread_state(self, new_config: types.ConfigDict) -> None:
+ """
+ Make sure that the thread loop is (not) running,
+ based on config.upload
+ """
+ self.config = new_config
+ should_be_running = (new_config["upload"] is not None) and (
+ not new_config["general"]["test_mode"]
+ )
+
+ if should_be_running and (not self.is_initialized):
+ self.__logger.info("Starting the thread")
+ self.is_initialized = True
+ self.__thread.start()
+
+ # set up a new thread instance for the next time the thread should start
+ if self.is_initialized:
+ if self.__thread.is_alive():
+ self.__logger.debug("Thread is alive")
+ else:
+ self.__logger.debug("Thread is not alive, running teardown")
+ self.__thread.join()
+ self.__thread = threading.Thread(target=UploadThread.main)
+ self.is_initialized = False
+
+ @staticmethod
+ def main(headless: bool = False) -> None:
+ """
+ Main entrypoint of the thread
+
+ headless mode = don't write to log files, print to console
+ """
+ global logger
+
+ if headless:
+ logger = utils.Logger(origin="upload", just_print=True)
+
+ while True:
+ try:
+ config = interfaces.ConfigInterface.read()
+ upload_config = config["upload"]
+ logger.info("Starting iteration, loading new config")
+
+ if (upload_config is None) or config["general"]["test_mode"]:
+ logger.info("Ending mainloop")
+ return
+
+ try:
+ connection = fabric.connection.Connection(
+ f"{upload_config['user']}@{upload_config['host']}",
+ connect_kwargs={"password": upload_config["password"]},
+ connect_timeout=5,
+ )
+ transfer_process = fabric.transfer.Transfer(connection)
+ except TimeoutError as e:
+ logger.error(f"could not reach host, waiting 5 minutes: {e}")
+ if headless:
+ break
+ time.sleep(300)
+ continue
+ except paramiko.ssh_exception.AuthenticationException as e:
+ logger.error(f"failed to authenticate, waiting 2 minutes: {e}")
+ if headless:
+ break
+ time.sleep(120)
+ continue
+
+ restart_mainloop = False
+ for category in ["helios", "ifgs"]:
+ if restart_mainloop:
+ break
+
+ if category == "helios":
+ if upload_config["upload_helios"]:
+ src_path = os.path.join(PROJECT_DIR, "logs", "helios")
+ dst_path = upload_config["dst_directory_helios"]
+ remove_files_after_upload = upload_config[
+ "remove_src_helios_after_upload"
+ ]
+ else:
+ continue
+ else:
+ if upload_config["upload_ifgs"]:
+ src_path = upload_config["src_directory_ifgs"]
+ dst_path = upload_config["dst_directory_ifgs"]
+ remove_files_after_upload = upload_config[
+ "remove_src_ifgs_after_upload"
+ ]
+ if not os.path.isdir(src_path):
+ logger.error(
+ f'config.upload.src_directory_ifgs ("{src_path}") is not a directory'
+ )
+ continue
+ else:
+ continue
+
+ src_date_strings = DirectoryUploadClient.get_directories_to_be_uploaded(
+ src_path
+ )
+ for date_string in src_date_strings:
+ new_config = interfaces.ConfigInterface.read()
+ new_upload_config = new_config["upload"]
+
+ # check for termination before processing each directory
+ if (new_upload_config is None) or new_config["general"]["test_mode"]:
+ return
+
+ # if the config changes, the mainloop should start over
+ if any(
+ [
+ upload_config[key] != new_upload_config[key] # type: ignore
+ for key in upload_config.keys()
+ ]
+ ):
+ logger.info("Change in config.upload has been detected")
+ restart_mainloop = True # stops outer loop (ifg, helios)
+ break # stops inner loop (ifg-/helios-dates)
+
+ try:
+ logger.info(f"Starting to process {date_string}")
+ DirectoryUploadClient(
+ date_string,
+ src_path,
+ dst_path,
+ remove_files_after_upload,
+ connection,
+ transfer_process,
+ ).run()
+ except InvalidUploadState as e:
+ logger.error(
+ f"uploading {date_string} is stuck in invalid state: {e}"
+ )
+
+ if headless:
+ break
+
+ # Close SSH and SCP connections
+ connection.close()
+
+ if not restart_mainloop:
+ # Wait 10 minutes before checking all directories again
+ logger.debug("Finished iteration, sleeping 10 minutes")
+ time.sleep(600)
+ except Exception as e:
+ logger.error("Error inside upload thread")
+ logger.exception(e)
+ return
diff --git a/packages/core/types/__init__.py b/packages/core/types/__init__.py
new file mode 100644
index 00000000..7bcef022
--- /dev/null
+++ b/packages/core/types/__init__.py
@@ -0,0 +1,52 @@
+from typing import Any
+import pydantic.errors
+import pydantic.validators
+
+
+# this workaround is necessary because pydantic currently does
+# not support strict validation on a whole Model. It converts
+# the input to the datatype, i.e. "23" will not raise an error
+# on int or float data types because it can be converted.
+
+# Read https://github.com/pydantic/pydantic/issues/578 on the reason for this decision
+# Watch https://github.com/pydantic/pydantic/issues/1098 for a possible fix
+
+
+def _strict_bool_validator(v: Any) -> bool:
+ if isinstance(v, bool):
+ return v
+ raise pydantic.errors.BoolError()
+
+
+def _strict_float_validator(v: Any) -> float:
+ if isinstance(v, float) or isinstance(v, int):
+ return v
+ raise pydantic.errors.FloatError()
+
+
+for i, (type_, _) in enumerate(pydantic.validators._VALIDATORS):
+ if type_ == int:
+ pydantic.validators._VALIDATORS[i] = (int, [pydantic.validators.strict_int_validator])
+ if type_ == float:
+ pydantic.validators._VALIDATORS[i] = (float, [_strict_float_validator])
+ if type_ == str:
+ pydantic.validators._VALIDATORS[i] = (str, [pydantic.validators.strict_str_validator])
+ if type_ == bool:
+ pydantic.validators._VALIDATORS[i] = (bool, [_strict_bool_validator])
+
+
+from .config import ConfigDict, ConfigDictPartial, ConfigSubDicts
+from .config import validate_config_dict
+
+from .persistent_state import PersistentStateDict, PersistentStateDictPartial
+from .persistent_state import validate_persistent_state_dict
+
+from .plc_specification import PlcSpecificationDict
+
+from .plc_state import PlcStateDict, PlcStateDictPartial
+
+from .state import StateDict, StateDictPartial
+from .state import validate_state_dict
+
+from .upload_meta import UploadMetaDict, UploadMetaDictPartial
+from .upload_meta import validate_upload_meta_dict
diff --git a/packages/core/types/config.py b/packages/core/types/config.py
new file mode 100644
index 00000000..69161ec1
--- /dev/null
+++ b/packages/core/types/config.py
@@ -0,0 +1,292 @@
+import os
+import pydantic
+from typing import Any, Callable, Literal, Optional, TypedDict
+
+
+TimeDict = TypedDict("TimeDict", {"hour": int, "minute": int, "second": int})
+TimeDictPartial = TypedDict(
+ "TimeDictPartial", {"hour": int, "minute": int, "second": int}, total=False
+)
+
+
+class ConfigSubDicts:
+ @staticmethod
+ class General(TypedDict):
+ version: Literal["4.0.5"]
+ seconds_per_core_interval: float
+ test_mode: bool
+ station_id: str
+ min_sun_elevation: float
+
+ @staticmethod
+ class GeneralPartial(TypedDict, total=False):
+ seconds_per_core_interval: float
+ test_mode: bool
+ station_id: str
+ min_sun_elevation: float
+
+ @staticmethod
+ class Opus(TypedDict):
+ em27_ip: str
+ executable_path: str
+ experiment_path: str
+ macro_path: str
+ username: str
+ password: str
+
+ @staticmethod
+ class OpusPartial(TypedDict, total=False):
+ em27_ip: str
+ executable_path: str
+ experiment_path: str
+ macro_path: str
+ username: str
+ password: str
+
+ @staticmethod
+ class Camtracker(TypedDict):
+ config_path: str
+ executable_path: str
+ learn_az_elev_path: str
+ sun_intensity_path: str
+ motor_offset_threshold: float
+
+ @staticmethod
+ class CamtrackerPartial(TypedDict, total=False):
+ config_path: str
+ executable_path: str
+ learn_az_elev_path: str
+ sun_intensity_path: str
+ motor_offset_threshold: float
+
+ @staticmethod
+ class ErrorEmail(TypedDict):
+ sender_address: str
+ sender_password: str
+ notify_recipients: bool
+ recipients: str
+
+ @staticmethod
+ class ErrorEmailPartial(TypedDict, total=False):
+ sender_address: str
+ sender_password: str
+ notify_recipients: bool
+ recipients: str
+
+ @staticmethod
+ class MeasurementDecision(TypedDict):
+ mode: Literal["automatic", "manual", "cli"]
+ manual_decision_result: bool
+ cli_decision_result: bool
+
+ @staticmethod
+ class MeasurementDecisionPartial(TypedDict, total=False):
+ mode: Literal["automatic", "manual", "cli"]
+ manual_decision_result: bool
+ cli_decision_result: bool
+
+ @staticmethod
+ class MeasurementTriggers(TypedDict):
+ consider_time: bool
+ consider_sun_elevation: bool
+ consider_helios: bool
+ start_time: TimeDict
+ stop_time: TimeDict
+ min_sun_elevation: float
+
+ @staticmethod
+ class MeasurementTriggersPartial(TypedDict, total=False):
+ consider_time: bool
+ consider_sun_elevation: bool
+ consider_helios: bool
+ start_time: TimeDictPartial
+ stop_time: TimeDictPartial
+ min_sun_elevation: float
+
+ @staticmethod
+ class TumPlc(TypedDict):
+ ip: str
+ version: Literal[1, 2]
+ controlled_by_user: bool
+
+ @staticmethod
+ class TumPlcPartial(TypedDict, total=False):
+ ip: str
+ version: Literal[1, 2]
+ controlled_by_user: bool
+
+ @staticmethod
+ class Helios(TypedDict):
+ camera_id: int
+ evaluation_size: int
+ seconds_per_interval: float
+ measurement_threshold: float
+ edge_detection_threshold: float
+ save_images: bool
+
+ @staticmethod
+ class HeliosPartial(TypedDict, total=False):
+ camera_id: int
+ evaluation_size: int
+ seconds_per_interval: float
+ measurement_threshold: float
+ edge_detection_threshold: float
+ save_images: bool
+
+ @staticmethod
+ class Upload(TypedDict):
+ host: str
+ user: str
+ password: str
+ upload_ifgs: bool
+ src_directory_ifgs: str
+ dst_directory_ifgs: str
+ remove_src_ifgs_after_upload: bool
+ upload_helios: bool
+ dst_directory_helios: str
+ remove_src_helios_after_upload: bool
+
+ @staticmethod
+ class UploadPartial(TypedDict, total=False):
+ host: str
+ user: str
+ password: str
+ upload_ifgs: bool
+ src_directory_ifgs: str
+ dst_directory_ifgs: str
+ remove_src_ifgs_after_upload: bool
+ upload_helios: bool
+ dst_directory_helios: str
+ remove_src_helios_after_upload: bool
+
+
+class ConfigDict(TypedDict):
+ general: ConfigSubDicts.General
+ opus: ConfigSubDicts.Opus
+ camtracker: ConfigSubDicts.Camtracker
+ error_email: ConfigSubDicts.ErrorEmail
+ measurement_decision: ConfigSubDicts.MeasurementDecision
+ measurement_triggers: ConfigSubDicts.MeasurementTriggers
+ tum_plc: Optional[ConfigSubDicts.TumPlc]
+ helios: Optional[ConfigSubDicts.Helios]
+ upload: Optional[ConfigSubDicts.Upload]
+
+
+class ConfigDictPartial(TypedDict, total=False):
+ general: ConfigSubDicts.GeneralPartial
+ opus: ConfigSubDicts.OpusPartial
+ camtracker: ConfigSubDicts.CamtrackerPartial
+ error_email: ConfigSubDicts.ErrorEmailPartial
+ measurement_decision: ConfigSubDicts.MeasurementDecisionPartial
+ measurement_triggers: ConfigSubDicts.MeasurementTriggersPartial
+ tum_plc: Optional[ConfigSubDicts.TumPlcPartial]
+ helios: Optional[ConfigSubDicts.HeliosPartial]
+ upload: Optional[ConfigSubDicts.UploadPartial]
+
+
+class ValidationError(Exception):
+ """
+ Will be raised in any custom checks on config dicts
+ have failed: file-existence, ip-format, min/max-range
+ """
+
+
+def validate_config_dict(o: Any, partial: bool = False, skip_filepaths: bool = False) -> None:
+ """
+ Check, whether a given object is a correct ConfigDict
+ Raises a pydantic.ValidationError if the object is invalid.
+
+ This should always be used when loading the object from a
+ JSON file!
+ """
+ try:
+ if partial:
+ _ValidationModel(partial=o)
+ else:
+ _ValidationModel(regular=o)
+ except pydantic.ValidationError as e:
+ pretty_error_messages = []
+ for error in e.errors():
+ fields = [str(f) for f in error["loc"][1:] if f not in ["__root__"]]
+ pretty_error_messages.append(f"{'.'.join(fields)} -> {error['msg']}")
+ raise ValidationError(f"config is invalid: {', '.join(pretty_error_messages)}")
+
+ new_object: ConfigDict = o
+
+ def get_nested_dict_property(property_path: str) -> Any:
+ prop = new_object
+ for key in property_path.split("."):
+ prop = prop[key] # type: ignore
+ return prop
+
+ def assert_min_max(property_path: str, min_value: float, max_value: float) -> None:
+ prop: float = get_nested_dict_property(property_path)
+ error_message = f"config.{property_path} must be in range [{min_value}, {max_value}]"
+ assert prop >= min_value, error_message
+ assert prop <= max_value, error_message
+
+ def assert_file_path(property_path: str) -> None:
+ prop: str = get_nested_dict_property(property_path)
+ if not skip_filepaths:
+ assert os.path.isfile(prop), f"config.{property_path} is not a file"
+
+ def assert_ip_address(property_path: str) -> None:
+ prop: str = get_nested_dict_property(property_path)
+ error_message = f"config.{property_path} is not a valid ip address"
+ values: list[str] = prop.split(".")
+ assert len(values) == 4, error_message
+ assert all([x.isnumeric() for x in values]), error_message
+ assert all([0 <= int(x) <= 255 for x in values]), error_message
+
+ assertions: list[Callable[[], None]] = [
+ lambda: assert_min_max("general.seconds_per_core_interval", 5, 600),
+ lambda: assert_min_max("general.min_sun_elevation", 0, 90),
+ lambda: assert_ip_address("opus.em27_ip"),
+ lambda: assert_file_path("opus.executable_path"),
+ lambda: assert_file_path("opus.experiment_path"),
+ lambda: assert_file_path("opus.macro_path"),
+ lambda: assert_file_path("camtracker.config_path"),
+ lambda: assert_file_path("camtracker.executable_path"),
+ lambda: assert_file_path("camtracker.learn_az_elev_path"),
+ lambda: assert_file_path("camtracker.sun_intensity_path"),
+ lambda: assert_min_max("camtracker.motor_offset_threshold", -360, 360),
+ lambda: assert_min_max("measurement_triggers.min_sun_elevation", 0, 90),
+ lambda: assert_min_max("measurement_triggers.start_time.hour", 0, 23),
+ lambda: assert_min_max("measurement_triggers.stop_time.hour", 0, 23),
+ lambda: assert_min_max("measurement_triggers.start_time.minute", 0, 59),
+ lambda: assert_min_max("measurement_triggers.stop_time.minute", 0, 59),
+ lambda: assert_min_max("measurement_triggers.start_time.second", 0, 59),
+ lambda: assert_min_max("measurement_triggers.stop_time.second", 0, 59),
+ lambda: assert_ip_address("tum_plc.ip"),
+ lambda: assert_min_max("helios.camera_id", 0, 999999),
+ lambda: assert_min_max("helios.evaluation_size", 1, 100),
+ lambda: assert_min_max("helios.seconds_per_interval", 5, 600),
+ lambda: assert_min_max("helios.measurement_threshold", 0.1, 1),
+ lambda: assert_ip_address("upload.host"),
+ ]
+
+ # this does not check for a valid upload.src_directory_ifgs path
+ # since the thread itself will check for this
+
+ pretty_error_messages = []
+
+ for assertion in assertions:
+ try:
+ assertion()
+ except AssertionError as a:
+ pretty_error_messages.append(a.args[0])
+ except (TypeError, KeyError):
+ # Will be ignored because the structure is already
+ # validated. Occurs when property is missing
+ pass
+
+ if len(pretty_error_messages) > 0:
+ raise ValidationError(f"config is invalid: {', '.join(pretty_error_messages)}")
+
+
+class _ValidationModel(pydantic.BaseModel):
+ regular: Optional[ConfigDict]
+ partial: Optional[ConfigDictPartial]
+
+ class Config:
+ extra = "forbid"
diff --git a/packages/core/types/persistent_state.py b/packages/core/types/persistent_state.py
new file mode 100644
index 00000000..476fdca4
--- /dev/null
+++ b/packages/core/types/persistent_state.py
@@ -0,0 +1,31 @@
+import pydantic
+from typing import Any, Optional, TypedDict
+
+
+class PersistentStateDict(TypedDict):
+ active_opus_macro_id: Optional[int]
+ current_exceptions: list[str]
+
+
+class PersistentStateDictPartial(TypedDict, total=False):
+ active_opus_macro_id: Optional[int]
+ current_exceptions: list[str]
+
+
+def validate_persistent_state_dict(o: Any, partial: bool = False) -> None:
+ """
+ Check, whether a given object is a correct PersistentStateDict
+ Raises a pydantic.ValidationError if the object is invalid.
+
+ This should always be used when loading the object from a
+ JSON file!
+ """
+ if partial:
+ _ValidationModel(partial=o)
+ else:
+ _ValidationModel(regular=o)
+
+
+class _ValidationModel(pydantic.BaseModel):
+ regular: Optional[PersistentStateDict]
+ partial: Optional[PersistentStateDictPartial]
diff --git a/packages/core/types/plc_specification.py b/packages/core/types/plc_specification.py
new file mode 100644
index 00000000..d05ac58a
--- /dev/null
+++ b/packages/core/types/plc_specification.py
@@ -0,0 +1,56 @@
+from typing import Optional, TypedDict
+
+# TODO: use tuples (3 ints vs 4 ints)
+
+
+class _PlcSpecificationDictActors(TypedDict):
+ current_angle: list[int]
+ fan_speed: list[int]
+ move_cover: list[int]
+ nominal_angle: list[int]
+
+
+class _PlcSpecificationDictControl(TypedDict):
+ auto_temp_mode: list[int]
+ manual_control: list[int]
+ manual_temp_mode: list[int]
+ reset: list[int]
+ sync_to_tracker: list[int]
+
+
+class _PlcSpecificationDictSensors(TypedDict):
+ humidity: list[int]
+ temperature: list[int]
+
+
+class _PlcSpecificationDictState(TypedDict):
+ cover_closed: list[int]
+ motor_failed: Optional[list[int]]
+ rain: list[int]
+ reset_needed: list[int]
+ ups_alert: list[int]
+
+
+class _PlcSpecificationDictPower(TypedDict):
+ camera: list[int]
+ computer: Optional[list[int]]
+ heater: list[int]
+ router: Optional[list[int]]
+ spectrometer: list[int]
+
+
+class _PlcSpecificationDictConnections(TypedDict):
+ camera: Optional[list[int]]
+ computer: list[int]
+ heater: list[int]
+ router: list[int]
+ spectrometer: Optional[list[int]]
+
+
+class PlcSpecificationDict(TypedDict):
+ actors: _PlcSpecificationDictActors
+ control: _PlcSpecificationDictControl
+ sensors: _PlcSpecificationDictSensors
+ state: _PlcSpecificationDictState
+ power: _PlcSpecificationDictPower
+ connections: _PlcSpecificationDictConnections
diff --git a/packages/core/types/plc_state.py b/packages/core/types/plc_state.py
new file mode 100644
index 00000000..39ceca83
--- /dev/null
+++ b/packages/core/types/plc_state.py
@@ -0,0 +1,107 @@
+from typing import Optional, TypedDict
+
+
+class _PlcStateDictActors(TypedDict):
+ fan_speed: Optional[int]
+ current_angle: Optional[int]
+
+
+class _PlcStateDictActorsPartial(TypedDict, total=False):
+ camera: Optional[bool]
+ fan_speed: Optional[int]
+ current_angle: Optional[int]
+
+
+class _PlcStateDictControl(TypedDict):
+ auto_temp_mode: Optional[bool]
+ manual_control: Optional[bool]
+ manual_temp_mode: Optional[bool]
+ sync_to_tracker: Optional[bool]
+
+
+class _PlcStateDictControlPartial(TypedDict, total=False):
+ camera: Optional[bool]
+ auto_temp_mode: Optional[bool]
+ manual_control: Optional[bool]
+ manual_temp_mode: Optional[bool]
+ sync_to_tracker: Optional[bool]
+
+
+class _PlcStateDictSensors(TypedDict):
+ humidity: Optional[int]
+ temperature: Optional[int]
+
+
+class _PlcStateDictSensorsPartial(TypedDict, total=False):
+ camera: Optional[bool]
+ humidity: Optional[int]
+ temperature: Optional[int]
+
+
+class _PlcStateDictState(TypedDict):
+ cover_closed: Optional[bool]
+ motor_failed: Optional[bool]
+ rain: Optional[bool]
+ reset_needed: Optional[bool]
+ ups_alert: Optional[bool]
+
+
+class _PlcStateDictStatePartial(TypedDict, total=False):
+ camera: Optional[bool]
+ cover_closed: Optional[bool]
+ motor_failed: Optional[bool]
+ rain: Optional[bool]
+ reset_needed: Optional[bool]
+ ups_alert: Optional[bool]
+
+
+class _PlcStateDictPower(TypedDict):
+ camera: Optional[bool]
+ computer: Optional[bool]
+ heater: Optional[bool]
+ router: Optional[bool]
+ spectrometer: Optional[bool]
+
+
+class _PlcStateDictPowerPartial(TypedDict, total=False):
+ camera: Optional[bool]
+ computer: Optional[bool]
+ heater: Optional[bool]
+ router: Optional[bool]
+ spectrometer: Optional[bool]
+
+
+class _PlcStateDictConnections(TypedDict):
+ camera: Optional[bool]
+ computer: Optional[bool]
+ heater: Optional[bool]
+ router: Optional[bool]
+ spectrometer: Optional[bool]
+
+
+class _PlcStateDictConnectionsPartial(TypedDict, total=False):
+ camera: Optional[bool]
+ computer: Optional[bool]
+ heater: Optional[bool]
+ router: Optional[bool]
+ spectrometer: Optional[bool]
+
+
+class PlcStateDict(TypedDict):
+ last_read_time: Optional[str]
+ actors: _PlcStateDictActors
+ control: _PlcStateDictControl
+ sensors: _PlcStateDictSensors
+ state: _PlcStateDictState
+ power: _PlcStateDictPower
+ connections: _PlcStateDictConnections
+
+
+class PlcStateDictPartial(TypedDict, total=False):
+ last_read_time: Optional[str]
+ actors: _PlcStateDictActorsPartial
+ control: _PlcStateDictControlPartial
+ sensors: _PlcStateDictSensorsPartial
+ state: _PlcStateDictStatePartial
+ power: _PlcStateDictPowerPartial
+ connections: _PlcStateDictConnectionsPartial
diff --git a/packages/core/types/state.py b/packages/core/types/state.py
new file mode 100644
index 00000000..ce43b2d3
--- /dev/null
+++ b/packages/core/types/state.py
@@ -0,0 +1,43 @@
+import pydantic
+from typing import Any, Union, Optional, TypedDict
+from .plc_state import PlcStateDict, PlcStateDictPartial
+
+
+class _OSStateDict(TypedDict):
+ cpu_usage: Optional[list[float]]
+ memory_usage: Optional[float]
+ last_boot_time: Optional[str]
+ filled_disk_space_fraction: Optional[float]
+
+
+class StateDict(TypedDict):
+ helios_indicates_good_conditions: Optional[bool]
+ measurements_should_be_running: bool
+ enclosure_plc_readings: PlcStateDict
+ os_state: _OSStateDict
+
+
+class StateDictPartial(TypedDict, total=False):
+ helios_indicates_good_conditions: Optional[int]
+ measurements_should_be_running: bool
+ enclosure_plc_readings: Union[PlcStateDict, PlcStateDictPartial]
+ os_state: _OSStateDict
+
+
+def validate_state_dict(o: Any, partial: bool = False) -> None:
+ """
+ Check, whether a given object is a correct StateDict
+ Raises a pydantic.ValidationError if the object is invalid.
+
+ This should always be used when loading the object from a
+ JSON file!
+ """
+ if partial:
+ _ValidationModel(partial=o)
+ else:
+ _ValidationModel(regular=o)
+
+
+class _ValidationModel(pydantic.BaseModel):
+ regular: Optional[StateDict]
+ partial: Optional[StateDictPartial]
diff --git a/packages/core/types/upload_meta.py b/packages/core/types/upload_meta.py
new file mode 100644
index 00000000..e7f79790
--- /dev/null
+++ b/packages/core/types/upload_meta.py
@@ -0,0 +1,35 @@
+import pydantic
+from typing import Any, Optional, TypedDict
+
+
+class UploadMetaDict(TypedDict):
+ complete: bool
+ fileList: list[str]
+ createdTime: float
+ lastModifiedTime: float
+
+
+class UploadMetaDictPartial(TypedDict, total=False):
+ complete: bool
+ fileList: list[str]
+ createdTime: float
+ lastModifiedTime: float
+
+
+def validate_upload_meta_dict(o: Any, partial: bool = False) -> None:
+ """
+ Check, whether a given object is a correct UploadMetaDict
+ Raises a pydantic.ValidationError if the object is invalid.
+
+ This should always be used when loading the object from a
+ JSON file!
+ """
+ if partial:
+ _ValidationModel(partial=o)
+ else:
+ _ValidationModel(regular=o)
+
+
+class _ValidationModel(pydantic.BaseModel):
+ regular: Optional[UploadMetaDict]
+ partial: Optional[UploadMetaDictPartial]
diff --git a/packages/core/utils/__init__.py b/packages/core/utils/__init__.py
index 05ee3ed0..2b0e2270 100644
--- a/packages/core/utils/__init__.py
+++ b/packages/core/utils/__init__.py
@@ -1,14 +1,7 @@
-from .functions import Logger
-from .functions import RingList
-from .functions import Astronomy
-from .functions import ExceptionEmailClient
-from .functions import ExceptionEmailClient
-from .functions import update_dict_recursively
-from .functions import ImageProcessing
-
-from .decorators import with_filelock
-
-from .interfaces import ConfigInterface, ConfigValidation
-from .interfaces import StateInterface
-from .interfaces import PLCInterface, PLCError
-from .interfaces import OSInterface, LowEnergyError, StorageError
+from .logger import Logger
+from .ring_list import RingList
+from .astronomy import Astronomy
+from .exception_email_client import ExceptionEmailClient
+from .update_dict_recursively import update_dict_recursively
+from .image_processing import ImageProcessing
+from .with_filelock import with_filelock
diff --git a/packages/core/utils/functions/astronomy.py b/packages/core/utils/astronomy.py
similarity index 73%
rename from packages/core/utils/functions/astronomy.py
rename to packages/core/utils/astronomy.py
index a19523e3..4a5450ed 100644
--- a/packages/core/utils/functions/astronomy.py
+++ b/packages/core/utils/astronomy.py
@@ -1,14 +1,20 @@
+from typing import Any, Optional
import astropy.coordinates as astropy_coordinates
import astropy.time as astropy_time
import astropy.units as astropy_units
+from packages.core import types
+
+# TODO: pass config via functions instea of indirectly more code but way simpler
+
+# TODO: add static typic (simplify code while doing that (less astropy stuff))
class Astronomy:
- CONFIG: dict = None
+ CONFIG: Optional[types.ConfigDict] = None
units = astropy_units
@staticmethod
- def get_current_sun_elevation():
+ def get_current_sun_elevation() -> Any:
"""calc_sun_angle_deg(location loc): Computes and returns the current sun
angle in degree, based on the location loc, computed by get_tracker_position(),
and current time. Therefore, the pack- ages time and astrophy are required.
@@ -22,7 +28,7 @@ def get_current_sun_elevation():
return sun_angle_deg
@staticmethod
- def __get_location_from_camtracker_config() -> tuple[float]:
+ def __get_location_from_camtracker_config() -> tuple[float, float, float]:
"""Reads the config.txt file of the CamTracker application to receive the
latest tracker position.
@@ -30,6 +36,8 @@ def __get_location_from_camtracker_config() -> tuple[float]:
tracker_position as a python list
"""
+ assert Astronomy.CONFIG is not None, "astronomy has no config yet"
+
with open(Astronomy.CONFIG["camtracker"]["config_path"], "r") as f:
_lines = f.readlines()
@@ -42,12 +50,13 @@ def __get_location_from_camtracker_config() -> tuple[float]:
assert _marker_line_index is not None, "Camtracker config file is not valid"
# (latitude, longitude, altitude)
- return tuple(
- [float(_lines[_marker_line_index + n].replace("\n", "")) for n in [1, 2, 3]]
- )
+ lat = float(_lines[_marker_line_index + 1].strip())
+ lon = float(_lines[_marker_line_index + 2].strip())
+ alt = float(_lines[_marker_line_index + 3].strip())
+ return (lat, lon, alt)
@staticmethod
- def __get_astropy_location():
+ def __get_astropy_location() -> Any:
"""
get_tracker_position(): Reads out the height, the longitude and the
latitude of the system from CamTrackerConfig.txt, and computes the location
diff --git a/packages/core/utils/decorators/__init__.py b/packages/core/utils/decorators/__init__.py
deleted file mode 100644
index 0b5afbf8..00000000
--- a/packages/core/utils/decorators/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .with_filelock import with_filelock
diff --git a/packages/core/utils/decorators/with_filelock.py b/packages/core/utils/decorators/with_filelock.py
deleted file mode 100644
index 6ecc8f5a..00000000
--- a/packages/core/utils/decorators/with_filelock.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import filelock
-
-# FileLock = Mark, that a file is being used and other programs
-# should not interfere. A file "*.lock" will be created and the
-# existence of this file will make the wrapped function wait until
-# it no longer exists.
-
-# A timeout of -1 means that the code waits forever
-
-
-def with_filelock(file_lock_path, timeout=-1):
- def with_fixed_filelock(function):
- def locked_function(*args, **kwargs):
- with filelock.FileLock(file_lock_path, timeout=timeout):
- return function(*args, **kwargs)
-
- return locked_function
-
- return with_fixed_filelock
diff --git a/packages/core/utils/decorators/with_timeout.py b/packages/core/utils/decorators/with_timeout.py
deleted file mode 100644
index 2ed8ccb6..00000000
--- a/packages/core/utils/decorators/with_timeout.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import signal
-import time
-
-# Derived from https://code-maven.com/python-timeout
-# TODO: figure out why this doesn't work on windows
-
-
-class TimeOutException(Exception):
- pass
-
-
-def _raise_timeout_exception(*args):
- raise TimeOutException()
-
-
-def with_timeout(timeout_seconds: int):
- def with_fixed_timeout(function):
- def bounded_function(*args, **kwargs):
- signal.signal(signal.SIGALRM, _raise_timeout_exception)
- signal.alarm(timeout_seconds)
- function_result = function(*args, **kwargs)
- signal.alarm(0)
- return function_result
-
- return bounded_function
-
- return with_fixed_timeout
-
-
-if __name__ == "__main__":
-
- @with_timeout(5)
- def example_function(start_value=1):
- n = start_value
- while True:
- print(f"n = {n}")
- n += 1
- time.sleep(1)
-
- try:
- example_function()
- except TimeOutException:
- print("function call 1 timed out")
-
- try:
- example_function(start_value=4)
- except TimeOutException:
- print("function call 2 timed out")
diff --git a/packages/core/utils/functions/exception_email_client.py b/packages/core/utils/exception_email_client.py
similarity index 76%
rename from packages/core/utils/functions/exception_email_client.py
rename to packages/core/utils/exception_email_client.py
index fc8b2ab1..8139dc6b 100644
--- a/packages/core/utils/functions/exception_email_client.py
+++ b/packages/core/utils/exception_email_client.py
@@ -5,31 +5,45 @@
from email.mime.multipart import MIMEMultipart
import subprocess
import traceback
+from typing import Optional
+from packages.core import types
dir = os.path.dirname
-PROJECT_DIR = dir(dir(dir(dir(dir(os.path.abspath(__file__))))))
+PROJECT_DIR = dir(dir(dir(dir(os.path.abspath(__file__)))))
-def get_pyra_version():
+def get_pyra_version() -> str:
+ """Get the current PYRA version from the UI's package.json file"""
with open(os.path.join(PROJECT_DIR, "packages", "ui", "package.json")) as f:
pyra_version: str = json.load(f)["version"]
assert pyra_version.startswith("4.")
return pyra_version
-def get_commit_sha():
+def get_commit_sha() -> Optional[str]:
+ """Get the current commit sha of the PYRA codebase
+ Returns None if git is not installed or directory is
+ a git repository."""
commit_sha_process = subprocess.run(
["git", "rev-parse", "--verify", "HEAD", "--short"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
- stdout = commit_sha_process.stdout.decode()
- commit_sha = stdout.replace("\n", "").replace(" ", "")
- assert len(commit_sha) > 0
- return commit_sha
-
-
-def get_current_log_lines():
+ if commit_sha_process.returncode == 0:
+ stdout = commit_sha_process.stdout.decode()
+ commit_sha = stdout.replace("\n", "").replace(" ", "")
+ assert len(commit_sha) > 0
+ if "fatal: not a git repository" in stdout:
+ return None
+ return commit_sha
+ else:
+ return None
+
+
+def get_current_log_lines() -> list[str]:
+ """Get the log line from the current info.log file. Only
+ returns the log lines from the latest two iterations.
+ """
with open(f"{PROJECT_DIR}/logs/info.log") as f:
latest_log_lines = f.readlines()
@@ -40,12 +54,13 @@ def get_current_log_lines():
included_iterations += 1
log_lines_in_email.append(l)
if included_iterations == 2:
- return log_lines_in_email[::-1]
+ break
+ return log_lines_in_email[::-1]
class ExceptionEmailClient:
@staticmethod
- def _send_email(config: dict, text: str, html: str, subject: str):
+ def _send_email(config: types.ConfigDict, text: str, html: str, subject: str) -> None:
sender_email = config["error_email"]["sender_address"]
sender_password = config["error_email"]["sender_password"]
recipients = config["error_email"]["recipients"].replace(" ", "").split(",")
@@ -68,7 +83,9 @@ def _send_email(config: dict, text: str, html: str, subject: str):
)
@staticmethod
- def handle_resolved_exception(config: dict):
+ def handle_resolved_exception(config: types.ConfigDict) -> None:
+ """Send out an email that all exceptions have been resolved
+ on this station."""
if not config["error_email"]["notify_recipients"]:
return
@@ -103,7 +120,9 @@ def handle_resolved_exception(config: dict):
ExceptionEmailClient._send_email(config, text, html, subject)
@staticmethod
- def handle_occured_exception(config: dict, exception: Exception):
+ def handle_occured_exception(config: types.ConfigDict, exception: Exception) -> None:
+ """Send out an email that a new exception has occured
+ on this station."""
if not config["error_email"]["notify_recipients"]:
return
@@ -117,7 +136,9 @@ def handle_occured_exception(config: dict, exception: Exception):
text = (
f"{type(exception).__name__} has occured. Details:\n"
+ f"{tb}\nLast 2 iteration's log lines:{logs}\n"
- + f"This email has been generated by Pyra {pyra_version} (commit {commit_sha}) automatically."
+ + f"This email has been generated by Pyra {pyra_version} "
+ + (f"(commit {commit_sha}) " if commit_sha is not None else "")
+ + "automatically."
)
pre_tag = '