From 87fef879170d3a92c7a9984c799542215c4b6e26 Mon Sep 17 00:00:00 2001 From: Victor Garcia Reolid Date: Thu, 6 Mar 2025 12:35:51 +0100 Subject: [PATCH 001/171] poc of websockets using flask-sock Signed-off-by: Victor Garcia Reolid --- flexmeasures/app.py | 4 ++++ flexmeasures/ws/__init__.py | 17 +++++++++++++++++ flexmeasures/ws/ping1.py | 14 ++++++++++++++ flexmeasures/ws/ping2.py | 14 ++++++++++++++ requirements/app.in | 1 + test_ws_client.py | 15 +++++++++++++++ 6 files changed, 65 insertions(+) create mode 100644 flexmeasures/ws/__init__.py create mode 100644 flexmeasures/ws/ping1.py create mode 100644 flexmeasures/ws/ping2.py create mode 100644 test_ws_client.py diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 630f312c5d..ab51ccf9c1 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -52,6 +52,10 @@ def create( # noqa C901 load_dotenv() app = Flask("flexmeasures") + from flexmeasures.ws import sock + + sock.init_app(app) + if env is not None: # overwrite app.config["FLEXMEASURES_ENV"] = env if app.config.get("FLEXMEASURES_ENV") == "testing": diff --git a/flexmeasures/ws/__init__.py b/flexmeasures/ws/__init__.py new file mode 100644 index 0000000000..ddd8079d4c --- /dev/null +++ b/flexmeasures/ws/__init__.py @@ -0,0 +1,17 @@ +import importlib +import pkgutil + + +from flask_sock import Sock + +sock = Sock() + + +def import_all_modules(package_name): + package = importlib.import_module(package_name) + for _, name, _ in pkgutil.iter_modules(package.__path__): + importlib.import_module(f"{package_name}.{name}") + + +# we need to import all the modules to run the route decorators +import_all_modules("flexmeasures.ws") diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py new file mode 100644 index 0000000000..61f24cf103 --- /dev/null +++ b/flexmeasures/ws/ping1.py @@ -0,0 +1,14 @@ +import logging +from flexmeasures.ws import sock + +logger = logging.Logger(__name__) + + +@sock.route("/ping1") +def echo1(ws): + while True: + data = ws.receive() + logger.error("ping1>" + data) + if data == "close": + break + ws.send(data) diff --git a/flexmeasures/ws/ping2.py b/flexmeasures/ws/ping2.py new file mode 100644 index 0000000000..a072f3f600 --- /dev/null +++ b/flexmeasures/ws/ping2.py @@ -0,0 +1,14 @@ +import logging +from flexmeasures.ws import sock + +logger = logging.Logger(__name__) + + +@sock.route("/ping2") +def echo2(ws): + while True: + data = ws.receive() + logger.error("ping2>" + data) + if data == "close": + break + ws.send(data) diff --git a/requirements/app.in b/requirements/app.in index 0a11ef7739..af9d1e4c77 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -65,3 +65,4 @@ flask>=1.0 werkzeug vl-convert-python Pillow>=10.0.1 # https://github.com/FlexMeasures/flexmeasures/security/dependabot/91 +flask-sock \ No newline at end of file diff --git a/test_ws_client.py b/test_ws_client.py new file mode 100644 index 0000000000..8f94d8cc73 --- /dev/null +++ b/test_ws_client.py @@ -0,0 +1,15 @@ +from simple_websocket import Client, ConnectionClosed + +def main(): + ws = Client.connect('ws://0.0.0.0:5000/ping2') + try: + while True: + data = input('> ') + ws.send(data) + data = ws.receive() + print(f'< {data}') + except (KeyboardInterrupt, EOFError, ConnectionClosed): + ws.close() + +if __name__ == '__main__': + main() \ No newline at end of file From 902c8cd1ed6e238fde5095e7fc7613f5be4ee7b1 Mon Sep 17 00:00:00 2001 From: Victor Garcia Reolid Date: Thu, 6 Mar 2025 12:39:57 +0100 Subject: [PATCH 002/171] apply pre-commit Signed-off-by: Victor Garcia Reolid --- test_ws_client.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test_ws_client.py b/test_ws_client.py index 8f94d8cc73..62f4cabc9a 100644 --- a/test_ws_client.py +++ b/test_ws_client.py @@ -1,15 +1,17 @@ from simple_websocket import Client, ConnectionClosed + def main(): - ws = Client.connect('ws://0.0.0.0:5000/ping2') + ws = Client.connect("ws://0.0.0.0:5000/ping2") try: while True: - data = input('> ') + data = input("> ") ws.send(data) data = ws.receive() - print(f'< {data}') + print(f"< {data}") except (KeyboardInterrupt, EOFError, ConnectionClosed): ws.close() -if __name__ == '__main__': - main() \ No newline at end of file + +if __name__ == "__main__": + main() From b54c530e745fe195cb9ad761057b9eb16985c406 Mon Sep 17 00:00:00 2001 From: Victor Garcia Reolid Date: Tue, 18 Mar 2025 13:32:35 +0100 Subject: [PATCH 003/171] poc: test if we can get the app context Signed-off-by: Victor Garcia Reolid --- flexmeasures/ws/ping1.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py index 61f24cf103..d15f4dcb74 100644 --- a/flexmeasures/ws/ping1.py +++ b/flexmeasures/ws/ping1.py @@ -1,14 +1,19 @@ import logging from flexmeasures.ws import sock +from flask import current_app +from flexmeasures import Sensor +from sqlalchemy import select, func logger = logging.Logger(__name__) @sock.route("/ping1") def echo1(ws): + while True: data = ws.receive() logger.error("ping1>" + data) if data == "close": break - ws.send(data) + sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() + ws.send(str(sensors)) From c1abc0b438480a1af875bb334f2db0af17e163d7 Mon Sep 17 00:00:00 2001 From: Victor Garcia Reolid Date: Tue, 25 Mar 2025 18:51:31 +0100 Subject: [PATCH 004/171] use app conect to release/attatch the connection to/from the connection pool Signed-off-by: Victor Garcia Reolid --- flexmeasures/ws/ping1.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py index d15f4dcb74..337ea215dc 100644 --- a/flexmeasures/ws/ping1.py +++ b/flexmeasures/ws/ping1.py @@ -2,18 +2,22 @@ from flexmeasures.ws import sock from flask import current_app from flexmeasures import Sensor -from sqlalchemy import select, func +from sqlalchemy import select logger = logging.Logger(__name__) @sock.route("/ping1") def echo1(ws): - while True: - data = ws.receive() - logger.error("ping1>" + data) - if data == "close": - break - sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() - ws.send(str(sensors)) + with current_app.app_context(): + data = ws.receive() + + if data == "close": + break + + sensors = current_app.db.session.execute( + select(Sensor).where(Sensor.id == 1) + ).scalar() + + ws.send(str(sensors.__dict__)) From 05ecea175057cb5d73e19ee67786647909e5e314 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 25 Mar 2025 21:42:48 +0100 Subject: [PATCH 005/171] Test: Headers exchange between server and client side Signed-off-by: Vlad Iftime --- flexmeasures/ws/__init__.py | 4 +++- flexmeasures/ws/ping1.py | 3 +-- flexmeasures/ws/v1.py | 31 +++++++++++++++++++++++++++++++ test_ws_client.py | 31 ++++++++++++++++++++++++++++--- 4 files changed, 63 insertions(+), 6 deletions(-) create mode 100644 flexmeasures/ws/v1.py diff --git a/flexmeasures/ws/__init__.py b/flexmeasures/ws/__init__.py index ddd8079d4c..f565dea771 100644 --- a/flexmeasures/ws/__init__.py +++ b/flexmeasures/ws/__init__.py @@ -1,6 +1,8 @@ import importlib import pkgutil - +from flask import Blueprint, current_app +from simple_websocket import Server +from flask_security import auth_token_required from flask_sock import Sock diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py index d15f4dcb74..3e4d22cef6 100644 --- a/flexmeasures/ws/ping1.py +++ b/flexmeasures/ws/ping1.py @@ -4,12 +4,11 @@ from flexmeasures import Sensor from sqlalchemy import select, func -logger = logging.Logger(__name__) +logger = logging.getLogger(__name__) @sock.route("/ping1") def echo1(ws): - while True: data = ws.receive() logger.error("ping1>" + data) diff --git a/flexmeasures/ws/v1.py b/flexmeasures/ws/v1.py new file mode 100644 index 0000000000..55c633d588 --- /dev/null +++ b/flexmeasures/ws/v1.py @@ -0,0 +1,31 @@ +import logging +from flexmeasures.ws import sock +from flask import current_app +from flexmeasures import Sensor +from sqlalchemy import select, func +import json + +logger = logging.Logger(__name__) + + +@sock.route("/v1") +def header_test(ws): + # Get all headers + all_headers = {k[5:].lower().replace("_", "-"): v for k, v in ws.environ.items() if k.startswith("HTTP_")} + + # Get specific header if needed + custom_header = ws.environ.get("HTTP_X_CUSTOM_HEADER") + + logger.info(f"All headers: {all_headers}") + logger.info(f"Custom header: {custom_header}") + + # Send initial message with metadata + ws.send(json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}})) + + while True: + data = ws.receive() + logger.error("v1>" + data) + if data == "close": + break + sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() + ws.send(str(sensors)) diff --git a/test_ws_client.py b/test_ws_client.py index 62f4cabc9a..e1066ed7ca 100644 --- a/test_ws_client.py +++ b/test_ws_client.py @@ -1,15 +1,40 @@ -from simple_websocket import Client, ConnectionClosed +from simple_websocket import Client, ConnectionClosed # type: ignore +import json +import sys def main(): - ws = Client.connect("ws://0.0.0.0:5000/ping2") + headers = { + "X-Custom-Header": "SomeValue", + # 'Authorization': 'Bearer YourToken', + } + ws = Client.connect("ws://127.0.0.1:5000/v1", headers=headers) try: + print("Connected to the WebSocket server!") + + # Get initial metadata message + initial_msg = json.loads(ws.receive()) + print(initial_msg) + if initial_msg.get("type") != "metadata": + print("ERROR: Server metadata not received!") + ws.close() + sys.exit(1) + + server_header = initial_msg.get("headers", {}).get("X-Server-Header") + if not server_header: + print("ERROR: Server header not found in metadata!") + ws.close() + sys.exit(1) + print(f"Server header received: {server_header}") + while True: data = input("> ") ws.send(data) data = ws.receive() print(f"< {data}") - except (KeyboardInterrupt, EOFError, ConnectionClosed): + + except (KeyboardInterrupt, EOFError, ConnectionClosed) as e: + print(f"Connection closed: {e}") ws.close() From ce51a17bd9765b31ebf4cf24fd6810fcb33aeff7 Mon Sep 17 00:00:00 2001 From: Victor Garcia Reolid Date: Wed, 26 Mar 2025 10:03:48 +0100 Subject: [PATCH 006/171] add analysis notebook Signed-off-by: Victor Garcia Reolid --- notebooks/websocket_analysis.ipynb | 206 +++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 notebooks/websocket_analysis.ipynb diff --git a/notebooks/websocket_analysis.ipynb b/notebooks/websocket_analysis.ipynb new file mode 100644 index 0000000000..03fbbcf178 --- /dev/null +++ b/notebooks/websocket_analysis.ipynb @@ -0,0 +1,206 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 64, + "id": "2b691e65-0818-438a-b484-9ce439baef44", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "import plotly.offline as pyo\n", + "import plotly.graph_objs as go\n", + "import plotly.io as pio\n", + "\n", + "\n", + "import plotly_express as px\n", + "pio.renderers.default = 'iframe'\n", + "\n", + "data = pd.read_csv(\"results-db-get-sensor-1.csv\", names=[\"time\", \"type\", \"id\", \"delta\"])\n", + "data[\"time\"] = data[\"time\"].apply(lambda x: pd.Timestamp.fromtimestamp(x))\n", + "data = data.dropna()\n", + "data[\"id2\"] = data.apply(lambda x: f\"{x['type']}: {x['id']}\", axis=1)\n", + "fig = px.line(data, x=\"time\", y=\"delta\", color=\"type\", labels={\n", + " \"time\" : \"Time\",\n", + " \"delta\": \"Roundtrip Time (s)\",\n", + " \"type\" : \"Protocol\"\n", + "}, title=\"Roundtrip Time with 1000 concurrent WS connections @ 1Hz and 1000 concurrent API requests @ 1Hz\")\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "id": "3a8f9091-b8bd-4802-af36-bb01a2942f9a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
id
type
API5
WS623
\n", + "
" + ], + "text/plain": [ + " id\n", + "type \n", + "API 5\n", + "WS 623" + ] + }, + "execution_count": 65, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "data[[\"id\", \"type\"]].drop_duplicates().groupby(\"type\").count()" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "066e04d7-148d-48a9-8de9-2cd9773b687e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig = px.histogram(data[8000:], x=\"delta\", color=\"type\", barmode=\"overlay\", labels={\n", + " \"delta\": \"Roundtrip Time (s)\",\n", + " \"type\" : \"Protocol\"\n", + "})\n", + "fig.update_traces(opacity=.9)\n", + "fig" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "id": "12555575-8f72-422e-a030-a5ea0df93a56", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0 WS: 0\n", + "1 WS: 1\n", + "2 WS: 2\n", + "3 WS: 3\n", + "4 WS: 4\n", + " ... \n", + "150 WS: 1\n", + "151 WS: 0\n", + "152 WS: 2\n", + "153 WS: 4\n", + "154 WS: 3\n", + "Length: 155, dtype: object" + ] + }, + "execution_count": 49, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc46bc48-c6c4-4326-9462-d5a89111865f", + "metadata": {}, + "outputs": [], + "source": [ + "data.gr" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "fm", + "language": "python", + "name": "fm" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d11f00eadcb674626f61c43254e3449df28095ed Mon Sep 17 00:00:00 2001 From: Ahmad Wahid Date: Tue, 24 Jun 2025 13:18:30 +0200 Subject: [PATCH 007/171] test change --- flexmeasures/ws/ping1.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py index 02afaacd5c..ba11e9b88e 100644 --- a/flexmeasures/ws/ping1.py +++ b/flexmeasures/ws/ping1.py @@ -2,22 +2,22 @@ from flexmeasures.ws import sock from flask import current_app from flexmeasures import Sensor -from sqlalchemy import select +from sqlalchemy import select, func logger = logging.getLogger(__name__) @sock.route("/ping1") def echo1(ws): - while True: - with current_app.app_context(): - data = ws.receive() - - if data == "close": - break + headers = ws.environ # Access all headers from the connection + logger.info("-----------------------------------------") + logger.info(f"Received headers: {headers}") + logger.info("-----------------------------------------") - sensors = current_app.db.session.execute( - select(Sensor).where(Sensor.id == 1) - ).scalar() - - ws.send(str(sensors.__dict__)) + while True: + data = ws.receive() + logger.error("ping1>" + data) + if data == "close": + break + sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() + ws.send(str(sensors)) From 8449eaa67e6933fee047e469537658ad0e1b75eb Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 25 Jun 2025 16:32:37 +0200 Subject: [PATCH 008/171] Trying to use S2 messages --- flexmeasures/app.py | 5 +- flexmeasures/ws/ping1.py | 13 +- flexmeasures/ws/s2_ws.py | 292 ++++++++++++++++++++++++++++++++++++++ flexmeasures/ws/v1.py | 4 +- requirements/3.9/app.txt | 22 +-- requirements/3.9/dev.txt | 11 +- requirements/3.9/docs.txt | 12 +- requirements/3.9/test.txt | 20 +-- s2_client_rm.py | 226 +++++++++++++++++++++++++++++ test_ws_client.py | 2 +- 10 files changed, 548 insertions(+), 59 deletions(-) create mode 100644 flexmeasures/ws/s2_ws.py create mode 100644 s2_client_rm.py diff --git a/flexmeasures/app.py b/flexmeasures/app.py index ab51ccf9c1..a9abcb989c 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -43,7 +43,7 @@ def create( # noqa C901 from flexmeasures.utils.config_utils import read_config, configure_logging from flexmeasures.utils.app_utils import set_secret_key, init_sentry from flexmeasures.utils.error_utils import add_basic_error_handlers - + from flexmeasures.ws.s2_ws import S2FlaskWSServer # Create app configure_logging() # do this first, see https://flask.palletsprojects.com/en/2.0.x/logging @@ -55,7 +55,8 @@ def create( # noqa C901 from flexmeasures.ws import sock sock.init_app(app) - + s2_ws = S2FlaskWSServer(app=app, sock=sock) + if env is not None: # overwrite app.config["FLEXMEASURES_ENV"] = env if app.config.get("FLEXMEASURES_ENV") == "testing": diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py index ba11e9b88e..429d7eead6 100644 --- a/flexmeasures/ws/ping1.py +++ b/flexmeasures/ws/ping1.py @@ -3,21 +3,26 @@ from flask import current_app from flexmeasures import Sensor from sqlalchemy import select, func - +import uuid +import json logger = logging.getLogger(__name__) @sock.route("/ping1") def echo1(ws): headers = ws.environ # Access all headers from the connection + client_id = str(uuid.uuid4()) + logger.info("-----------------------------------------") logger.info(f"Received headers: {headers}") logger.info("-----------------------------------------") - + logger.info(f"Type of ws: {type(ws)}") + logger.info(f"Client ID: {client_id}") + ws.send(json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}})) while True: data = ws.receive() logger.error("ping1>" + data) if data == "close": break - sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() - ws.send(str(sensors)) + # sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() + ws.send(data ) diff --git a/flexmeasures/ws/s2_ws.py b/flexmeasures/ws/s2_ws.py new file mode 100644 index 0000000000..7562995257 --- /dev/null +++ b/flexmeasures/ws/s2_ws.py @@ -0,0 +1,292 @@ +""" +Flask implementation of the S2 protocol WebSocket server. +""" + +import asyncio +import json +import logging +import traceback +import uuid +from typing import Any, Callable, Dict, Optional, Type + +from flask import Flask +from flask_sock import ConnectionClosed, Sock + +from s2python.common import ( + ControlType, + EnergyManagementRole, + Handshake, + HandshakeResponse, + ReceptionStatus, + ReceptionStatusValues, + SelectControlType, +) +from s2python.communication.reception_status_awaiter import ReceptionStatusAwaiter +from s2python.message import S2Message +from s2python.s2_parser import S2Parser +from s2python.s2_validation_error import S2ValidationError +from flexmeasures.ws import sock + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("S2FlaskWSServer") + + +class MessageHandlers: + """Class to manage message handlers for different message types.""" + + handlers: Dict[Type[S2Message], Callable] + + def __init__(self) -> None: + self.handlers = {} + + async def handle_message( + self, + server: "S2FlaskWSServer", + msg: S2Message, + websocket: Sock, + ) -> None: + """Handle the S2 message using the registered handler. + Args: + server: The server instance handling the message + msg: The S2 message to handle + websocket: The websocket connection to the client + """ + handler = self.handlers.get(type(msg)) + if handler is not None: + try: + if asyncio.iscoroutinefunction(handler): + await handler(server, msg, websocket) + else: + + def do_message() -> None: + handler(server, msg, websocket) + + eventloop = asyncio.get_event_loop() + await eventloop.run_in_executor(executor=None, func=do_message) + except Exception: + logger.error( + "While processing message %s an unrecoverable error occurred.", + msg.message_id, # type: ignore[attr-defined] + ) + logger.error("Error: %s", traceback.format_exc()) + await server.respond_with_reception_status( + subject_message_id=msg.message_id, # type: ignore[attr-defined] + status=ReceptionStatusValues.PERMANENT_ERROR, + diagnostic_label=f"While processing message {msg.message_id} " # type: ignore[attr-defined] + f"an unrecoverable error occurred.", + websocket=websocket, + ) + raise + else: + logger.warning( + "Received a message of type %s but no handler is registered. Ignoring the message.", + type(msg), + ) + + def register_handler(self, msg_type: Type[S2Message], handler: Callable[..., Any]) -> None: + """Register a handler for a specific message type. + Args: + msg_type: The message type to handle + handler: The handler function + """ + self.handlers[msg_type] = handler + + +class S2FlaskWSServer: + """Flask-based WebSocket server implementation for S2 protocol.""" + + def __init__( + self, + role: EnergyManagementRole = EnergyManagementRole.CEM, + ws_path: str = "/", + app: Optional[Flask] = None, + sock: Optional[Sock] = None, + ) -> None: + """Initialize the WebSocket server. + Args: + app: The Flask app to use + sock: The Sock instance to use + role: The role of this server (CEM or RM) + ws_path: The path for the WebSocket endpoint. + """ + + self.role = role + self.ws_path = ws_path + + self.app = app or Flask(__name__) + self.sock = sock or Sock(self.app) + + self._handlers = MessageHandlers() + self.s2_parser = S2Parser() + self._connections: Dict[str, Sock] = {} + self.reception_status_awaiter = ReceptionStatusAwaiter() + + self._register_default_handlers() + + def _register_default_handlers(self) -> None: + """Register default message handlers.""" + self._handlers.register_handler(Handshake, self.handle_handshake) + self._handlers.register_handler(HandshakeResponse, self.handle_handshake_response) + self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) + + def _ws_handler(self, ws: Sock) -> None: + """ + Wrapper to run the async websocket handler from a synchronous context. + This is required for Flask's development server. An ASGI server would + be able to run the async handler directly. + """ + try: + asyncio.run(self._handle_websocket_connection(ws)) + except Exception as e: + # The websocket is likely closed, or another network error occurred. + logger.error("Error in websocket handler: %s", e) + + @sock.route("/s2") + async def _handle_websocket_connection(self, websocket: Sock) -> None: + """Handle incoming WebSocket connections.""" + client_id = str(uuid.uuid4()) + logger.info("Client %s connected.", client_id) + self._connections[client_id] = websocket + + try: + while True: + message = await websocket.receive() + try: + s2_msg = self.s2_parser.parse_as_any_message(message) + if isinstance(s2_msg, ReceptionStatus): + await self.reception_status_awaiter.receive_reception_status(s2_msg) + continue + except json.JSONDecodeError: + await self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Not valid json.", + websocket=websocket, + ) + continue + try: + await self._handlers.handle_message(self, s2_msg, websocket) + except json.JSONDecodeError: + await self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Not valid json.", + websocket=websocket, + ) + except S2ValidationError as e: + json_msg = json.loads(message) + message_id = json_msg.get("message_id") + if message_id: + await self.respond_with_reception_status( + subject_message_id=message_id, + status=ReceptionStatusValues.INVALID_MESSAGE, + diagnostic_label=str(e), + websocket=websocket, + ) + else: + await self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Message appears valid json but could not find a message_id field.", + websocket=websocket, + ) + except Exception as e: + logger.error("Error processing message: %s", str(e)) + raise + except ConnectionClosed: + logger.info("Connection with client %s closed", client_id) + finally: + if client_id in self._connections: + del self._connections[client_id] + logger.info("Client %s disconnected", client_id) + + async def respond_with_reception_status( + self, + subject_message_id: uuid.UUID, + status: ReceptionStatusValues, + diagnostic_label: str, + websocket: Sock, + ) -> None: + """Send a reception status response.""" + response = ReceptionStatus( + subject_message_id=subject_message_id, status=status, diagnostic_label=diagnostic_label + ) + logger.info("Sending reception status %s for message %s", status, subject_message_id) + try: + await websocket.send(response.to_json()) + except ConnectionClosed: + logger.warning("Connection closed while sending reception status") + + async def send_msg_and_await_reception_status_async( + self, + s2_msg: S2Message, + websocket: Sock, + timeout_reception_status: float = 20.0, + raise_on_error: bool = True, + ) -> ReceptionStatus: + """Send a message and await a reception status.""" + await self._send_and_forget(s2_msg, websocket) + try: + response = await asyncio.wait_for(websocket.receive(), timeout=timeout_reception_status) + # Assuming the response is the correct reception status + return ReceptionStatus( + subject_message_id=s2_msg.message_id, # type: ignore[attr-defined] + status=ReceptionStatusValues.OK, + diagnostic_label="Reception status received.", + ) + except asyncio.TimeoutError: + if raise_on_error: + raise TimeoutError(f"Did not receive a reception status on time for {s2_msg.message_id}") # type: ignore[attr-defined] + return ReceptionStatus( + subject_message_id=s2_msg.message_id, # type: ignore[attr-defined] + status=ReceptionStatusValues.PERMANENT_ERROR, + diagnostic_label="Timeout waiting for reception status.", + ) + except ConnectionClosed: + return ReceptionStatus( + subject_message_id=s2_msg.message_id, # type: ignore[attr-defined] + status=ReceptionStatusValues.OK, + diagnostic_label="Connection closed, assuming OK status.", + ) + + async def handle_handshake(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: + """Handle handshake messages.""" + if not isinstance(message, Handshake): + return + + handshake_response = HandshakeResponse( + message_id=message.message_id, selected_protocol_version=message.supported_protocol_versions + ) + await self.send_msg_and_await_reception_status_async(handshake_response, websocket) + + await self.respond_with_reception_status( + subject_message_id=message.message_id, + status=ReceptionStatusValues.OK, + diagnostic_label="Handshake received", + websocket=websocket, + ) + + async def handle_reception_status(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: + """Handle reception status messages.""" + if not isinstance(message, ReceptionStatus): + return + logger.info("Received ReceptionStatus in handle_reception_status: %s", message.to_json()) + + async def handle_handshake_response(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: + """Handle handshake response messages.""" + if not isinstance(message, HandshakeResponse): + return + logger.debug("Received HandshakeResponse: %s", message.to_json()) + + async def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: + """Send a message and forget about it.""" + try: + await websocket.send(s2_msg.to_json()) + except ConnectionClosed: + logger.warning("Connection closed while sending message") + + async def send_select_control_type(self, control_type: ControlType, websocket: Sock) -> None: + """Select the control type.""" + select_control_type = SelectControlType(message_id=uuid.uuid4(), control_type=control_type) + await self._send_and_forget(select_control_type, websocket) diff --git a/flexmeasures/ws/v1.py b/flexmeasures/ws/v1.py index 55c633d588..a19d3ed5c9 100644 --- a/flexmeasures/ws/v1.py +++ b/flexmeasures/ws/v1.py @@ -4,7 +4,6 @@ from flexmeasures import Sensor from sqlalchemy import select, func import json - logger = logging.Logger(__name__) @@ -15,7 +14,8 @@ def header_test(ws): # Get specific header if needed custom_header = ws.environ.get("HTTP_X_CUSTOM_HEADER") - + # show the type of ws + logger.info(f"Type of ws: {type(ws)}") logger.info(f"All headers: {all_headers}") logger.info(f"Custom header: {custom_header}") diff --git a/requirements/3.9/app.txt b/requirements/3.9/app.txt index 34857dc353..31f8f04e68 100644 --- a/requirements/3.9/app.txt +++ b/requirements/3.9/app.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --output-file=requirements/3.9/app.txt requirements/app.in @@ -76,6 +76,7 @@ flask==3.0.3 # flask-migrate # flask-principal # flask-security-too + # flask-sock # flask-sqlalchemy # flask-sslify # flask-wtf @@ -101,6 +102,8 @@ flask-principal==0.4.0 # via flask-security-too flask-security-too==5.5.2 # via -r requirements/app.in +flask-sock==0.7.0 + # via -r requirements/app.in flask-sqlalchemy==3.1.1 # via # -r requirements/app.in @@ -117,8 +120,8 @@ flexparser==0.3.1 # via pint fonttools==4.53.1 # via matplotlib -greenlet==3.1.0 - # via sqlalchemy +h11==0.16.0 + # via wsproto humanize==4.10.0 # via -r requirements/app.in idna==3.10 @@ -129,12 +132,9 @@ idna==3.10 importlib-metadata==8.5.0 # via # -r requirements/app.in - # flask # timely-beliefs importlib-resources==6.4.5 - # via - # flask-security-too - # matplotlib + # via flask-security-too inflect==6.0.2 # via -r requirements/app.in inflection==0.5.1 @@ -318,6 +318,8 @@ scipy==1.13.1 # timetomodel sentry-sdk[flask]==2.14.0 # via -r requirements/app.in +simple-websocket==1.1.0 + # via flask-sock six==1.16.0 # via # isodate @@ -376,6 +378,8 @@ werkzeug==3.0.4 # flask-login workalendar==17.0.0 # via -r requirements/app.in +wsproto==1.2.0 + # via simple-websocket wtforms==3.1.2 # via # flask-security-too @@ -383,6 +387,4 @@ wtforms==3.1.2 xlrd==2.0.1 # via -r requirements/app.in zipp==3.20.2 - # via - # importlib-metadata - # importlib-resources + # via importlib-metadata diff --git a/requirements/3.9/dev.txt b/requirements/3.9/dev.txt index f94b1e95de..ec6fa744a4 100644 --- a/requirements/3.9/dev.txt +++ b/requirements/3.9/dev.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --constraint=requirements/3.9/app.txt --constraint=requirements/3.9/test.txt --output-file=requirements/3.9/dev.txt requirements/dev.in @@ -63,19 +63,10 @@ pyyaml==6.0.2 # pre-commit setuptools-scm==8.1.0 # via -r requirements/dev.in -tomli==2.0.1 - # via - # -c requirements/3.9/test.txt - # black - # mypy - # setuptools-scm typing-extensions==4.12.2 # via # -c requirements/3.9/app.txt - # -c requirements/3.9/test.txt - # black # mypy - # setuptools-scm virtualenv==20.26.5 # via pre-commit watchdog==5.0.2 diff --git a/requirements/3.9/docs.txt b/requirements/3.9/docs.txt index b824339708..3fc4132366 100644 --- a/requirements/3.9/docs.txt +++ b/requirements/3.9/docs.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --constraint=requirements/3.9/app.txt --output-file=requirements/3.9/docs.txt requirements/docs.in @@ -29,10 +29,6 @@ idna==3.10 # requests imagesize==1.4.1 # via sphinx -importlib-metadata==8.5.0 - # via - # -c requirements/3.9/app.txt - # sphinx jinja2==3.1.4 # via # -c requirements/3.9/app.txt @@ -92,13 +88,7 @@ sphinxcontrib-qthelp==2.0.0 # via sphinx sphinxcontrib-serializinghtml==2.0.0 # via sphinx -tomli==2.2.1 - # via sphinx urllib3==2.2.3 # via # -c requirements/3.9/app.txt # requests -zipp==3.20.2 - # via - # -c requirements/3.9/app.txt - # importlib-metadata diff --git a/requirements/3.9/test.txt b/requirements/3.9/test.txt index 94b3512248..6dfd559313 100644 --- a/requirements/3.9/test.txt +++ b/requirements/3.9/test.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --constraint=requirements/3.9/app.txt --output-file=requirements/3.9/test.txt requirements/test.in @@ -26,8 +26,6 @@ click==8.1.7 # flask coverage[toml]==7.6.1 # via pytest-cov -exceptiongroup==1.2.2 - # via pytest fakeredis==2.24.1 # via -r requirements/test.in flask==3.0.3 @@ -40,10 +38,6 @@ idna==3.10 # via # -c requirements/3.9/app.txt # requests -importlib-metadata==8.5.0 - # via - # -c requirements/3.9/app.txt - # flask iniconfig==2.0.0 # via pytest itsdangerous==2.2.0 @@ -102,14 +96,6 @@ sortedcontainers==2.4.0 # via fakeredis termcolor==2.4.0 # via pytest-sugar -tomli==2.0.1 - # via - # coverage - # pytest -typing-extensions==4.12.2 - # via - # -c requirements/3.9/app.txt - # fakeredis urllib3==2.2.3 # via # -c requirements/3.9/app.txt @@ -119,7 +105,3 @@ werkzeug==3.0.4 # -c requirements/3.9/app.txt # flask # pytest-flask -zipp==3.20.2 - # via - # -c requirements/3.9/app.txt - # importlib-metadata diff --git a/s2_client_rm.py b/s2_client_rm.py new file mode 100644 index 0000000000..d64440bcb5 --- /dev/null +++ b/s2_client_rm.py @@ -0,0 +1,226 @@ +import argparse +import logging +import threading +import datetime +import uuid +from typing import Callable + +from s2python.authorization.default_client import S2DefaultClient +from s2python.generated.gen_s2_pairing import ( + S2NodeDescription, + Deployment, + PairingToken, + S2Role, + Protocols, +) + +from s2python.common import ( + EnergyManagementRole, + Duration, + Role, + RoleType, + Commodity, + Currency, + NumberRange, + PowerRange, + CommodityQuantity, +) +from s2python.frbc import ( + FRBCInstruction, + FRBCSystemDescription, + FRBCActuatorDescription, + FRBCStorageDescription, + FRBCOperationMode, + FRBCOperationModeElement, + FRBCFillLevelTargetProfile, + FRBCFillLevelTargetProfileElement, + FRBCStorageStatus, + FRBCActuatorStatus, +) +from s2python.communication.s2_connection import S2Connection, AssetDetails +from s2python.s2_control_type import FRBCControlType, NoControlControlType +from s2python.message import S2Message + +logger = logging.getLogger("s2python") + + +class MyFRBCControlType(FRBCControlType): + def handle_instruction(self, conn: S2Connection, msg: S2Message, send_okay: Callable[[], None]) -> None: + if not isinstance(msg, FRBCInstruction): + raise RuntimeError(f"Expected an FRBCInstruction but received a message of type {type(msg)}.") + print(f"I have received the message {msg} from {conn}") + + def activate(self, conn: S2Connection) -> None: + print("The control type FRBC is now activated.") + + print("Time to send a FRBC SystemDescription") + actuator_id = uuid.uuid4() + operation_mode_id = uuid.uuid4() + conn.send_msg_and_await_reception_status_sync( + FRBCSystemDescription( + message_id=uuid.uuid4(), + valid_from=datetime.datetime.now(tz=datetime.timezone.utc), + actuators=[ + FRBCActuatorDescription( + id=actuator_id, + operation_modes=[ + FRBCOperationMode( + id=operation_mode_id, + elements=[ + FRBCOperationModeElement( + fill_level_range=NumberRange(start_of_range=0.0, end_of_range=100.0), + fill_rate=NumberRange(start_of_range=-5.0, end_of_range=5.0), + power_ranges=[ + PowerRange( + start_of_range=-200.0, + end_of_range=200.0, + commodity_quantity=CommodityQuantity.ELECTRIC_POWER_L1, + ) + ], + ) + ], + diagnostic_label="Load & unload battery", + abnormal_condition_only=False, + ) + ], + transitions=[], + timers=[], + supported_commodities=[Commodity.ELECTRICITY], + ) + ], + storage=FRBCStorageDescription( + fill_level_range=NumberRange(start_of_range=0.0, end_of_range=100.0), + fill_level_label="%", + diagnostic_label="Imaginary battery", + provides_fill_level_target_profile=True, + provides_leakage_behaviour=False, + provides_usage_forecast=False, + ), + ) + ) + print("Also send the target profile") + + conn.send_msg_and_await_reception_status_sync( + FRBCFillLevelTargetProfile( + message_id=uuid.uuid4(), + start_time=datetime.datetime.now(tz=datetime.timezone.utc), + elements=[ + FRBCFillLevelTargetProfileElement( + duration=Duration.from_milliseconds(30_000), + fill_level_range=NumberRange(start_of_range=20.0, end_of_range=30.0), + ), + FRBCFillLevelTargetProfileElement( + duration=Duration.from_milliseconds(300_000), + fill_level_range=NumberRange(start_of_range=40.0, end_of_range=50.0), + ), + ], + ) + ) + + print("Also send the storage status.") + conn.send_msg_and_await_reception_status_sync( + FRBCStorageStatus(message_id=uuid.uuid4(), present_fill_level=10.0) + ) + + print("Also send the actuator status.") + conn.send_msg_and_await_reception_status_sync( + FRBCActuatorStatus( + message_id=uuid.uuid4(), + actuator_id=actuator_id, + active_operation_mode_id=operation_mode_id, + operation_mode_factor=0.5, + ) + ) + + def deactivate(self, conn: S2Connection) -> None: + print("The control type FRBC is now deactivated.") + + +class MyNoControlControlType(NoControlControlType): + def activate(self, conn: S2Connection) -> None: + print("The control type NoControl is now activated.") + + def deactivate(self, conn: S2Connection) -> None: + print("The control type NoControl is now deactivated.") + + +if __name__ == "__main__": + # Configuration + parser = argparse.ArgumentParser(description="S2 pairing example for FRBC RM") + parser.add_argument("--pairing_endpoint", type=str, required=True) + parser.add_argument("--pairing_token", type=str, required=True) + + args = parser.parse_args() + + pairing_endpoint = args.pairing_endpoint + pairing_token = args.pairing_token + + # --- Client Setup --- + # Create node description + node_description = S2NodeDescription( + brand="TNO", + logoUri="https://www.tno.nl/publish/pages/5604/tno-logo-1484x835_003_.jpg", + type="demo frbc example", + modelName="S2 pairing example stub", + userDefinedName="TNO S2 pairing example for frbc", + role=S2Role.RM, + deployment=Deployment.LAN, + ) + + # Create a client to perform the pairing + client = S2DefaultClient( + pairing_uri=pairing_endpoint, + token=PairingToken(token=pairing_token), + node_description=node_description, + verify_certificate=False, + supported_protocols=[Protocols.WebSocketSecure], + ) + + try: + # # Request pairing + # logger.info("Initiating pairing with endpoint: %s", pairing_endpoint) + # pairing_response = client.request_pairing() + # logger.info("Pairing request successful, requesting connection...") + + # # Request connection details + # connection_details = client.request_connection() + # logger.info("Connection request successful") + + # # Solve challenge + # challenge_result = client.solve_challenge() + # logger.info("Challenge solved successfully") + + s2_connection = S2Connection( + url="wss://127.0.0.1:5000/v1", # type: ignore + role=EnergyManagementRole.RM, + control_types=[MyFRBCControlType(), MyNoControlControlType()], + asset_details=AssetDetails( + resource_id=client.client_node_id, + name="Some asset", + instruction_processing_delay=Duration.from_milliseconds(20), + roles=[Role(role=RoleType.ENERGY_CONSUMER, commodity=Commodity.ELECTRICITY)], + currency=Currency.EUR, + provides_forecast=False, + provides_power_measurements=[CommodityQuantity.ELECTRIC_POWER_L1], + ), + reconnect=True, + verify_certificate=False, + ) + + # Start S2 session with the connection details + logger.info("Starting S2 session...") + s2_connection.start_as_rm() + logger.info("S2 session is running. Press Ctrl+C to exit.") + + # Keep the main thread alive to allow the WebSocket connection to run. + event = threading.Event() + event.wait() + + except KeyboardInterrupt: + logger.info("Program interrupted by user.") + except Exception as e: + logger.error("Error during pairing process: %s", e, exc_info=True) + raise e + finally: + client.close_connection() + logger.info("Connection closed.") diff --git a/test_ws_client.py b/test_ws_client.py index e1066ed7ca..4cba17726b 100644 --- a/test_ws_client.py +++ b/test_ws_client.py @@ -8,7 +8,7 @@ def main(): "X-Custom-Header": "SomeValue", # 'Authorization': 'Bearer YourToken', } - ws = Client.connect("ws://127.0.0.1:5000/v1", headers=headers) + ws = Client.connect("ws://127.0.0.1:5000/ping1", headers=headers) try: print("Connected to the WebSocket server!") From fb9db22e85beacb97b5e6a09d7d5dac32d87affb Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+flix6x@users.noreply.github.com> Date: Fri, 13 Jun 2025 17:27:59 +0200 Subject: [PATCH 009/171] style: flake8 Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 602e53a211..fa02ac0445 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -44,6 +44,7 @@ def create( # noqa C901 from flexmeasures.utils.app_utils import set_secret_key, init_sentry from flexmeasures.utils.error_utils import add_basic_error_handlers from flexmeasures.ws.s2_ws import S2FlaskWSServer + # Create app configure_logging() # do this first, see https://flask.palletsprojects.com/en/2.0.x/logging @@ -55,8 +56,8 @@ def create( # noqa C901 from flexmeasures.ws import sock sock.init_app(app) - s2_ws = S2FlaskWSServer(app=app, sock=sock) - + s2_ws = S2FlaskWSServer(app=app, sock=sock) # noqa: F841 + if env is not None: # overwrite app.config["FLEXMEASURES_ENV"] = env if app.config.get("FLEXMEASURES_ENV") == "testing": From a6a4c1daa710797deb64cdec36e669d665e5c728 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 25 Jun 2025 16:45:10 +0200 Subject: [PATCH 010/171] style: black Signed-off-by: F.N. Claessen --- flexmeasures/ws/v1.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/v1.py b/flexmeasures/ws/v1.py index a19d3ed5c9..741d26a37f 100644 --- a/flexmeasures/ws/v1.py +++ b/flexmeasures/ws/v1.py @@ -4,13 +4,18 @@ from flexmeasures import Sensor from sqlalchemy import select, func import json + logger = logging.Logger(__name__) @sock.route("/v1") def header_test(ws): # Get all headers - all_headers = {k[5:].lower().replace("_", "-"): v for k, v in ws.environ.items() if k.startswith("HTTP_")} + all_headers = { + k[5:].lower().replace("_", "-"): v + for k, v in ws.environ.items() + if k.startswith("HTTP_") + } # Get specific header if needed custom_header = ws.environ.get("HTTP_X_CUSTOM_HEADER") @@ -20,7 +25,9 @@ def header_test(ws): logger.info(f"Custom header: {custom_header}") # Send initial message with metadata - ws.send(json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}})) + ws.send( + json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}}) + ) while True: data = ws.receive() From fe9fff46360550cd04c564de17d705a7d9fa60bd Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 26 Jun 2025 13:11:21 +0200 Subject: [PATCH 011/171] dev: add s2-python requirement Signed-off-by: F.N. Claessen --- requirements/app.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements/app.in b/requirements/app.in index 18678ec362..dcc99cc21a 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -67,4 +67,5 @@ flask>=1.0 werkzeug vl-convert-python Pillow>=10.0.1 # https://github.com/FlexMeasures/flexmeasures/security/dependabot/91 -flask-sock \ No newline at end of file +flask-sock +s2-python[ws] @ git+https://github.com/flexiblepower/s2-python.git@feat/flask_server \ No newline at end of file From a9f93cefb235236dd283d2b00d3d8aeb254eec7a Mon Sep 17 00:00:00 2001 From: Ahmad Wahid Date: Mon, 30 Jun 2025 12:19:19 +0200 Subject: [PATCH 012/171] run ws in async mode --- flexmeasures/ws/ping1.py | 8 ++--- test_ws_client.py | 75 +++++++++++++++++++++++++--------------- 2 files changed, 52 insertions(+), 31 deletions(-) diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py index 429d7eead6..b9ac398f42 100644 --- a/flexmeasures/ws/ping1.py +++ b/flexmeasures/ws/ping1.py @@ -9,7 +9,7 @@ @sock.route("/ping1") -def echo1(ws): +async def echo1(ws): headers = ws.environ # Access all headers from the connection client_id = str(uuid.uuid4()) @@ -18,11 +18,11 @@ def echo1(ws): logger.info("-----------------------------------------") logger.info(f"Type of ws: {type(ws)}") logger.info(f"Client ID: {client_id}") - ws.send(json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}})) + await ws.send(json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}})) while True: - data = ws.receive() + data = await ws.receive() logger.error("ping1>" + data) if data == "close": break # sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() - ws.send(data ) + await ws.send(data ) diff --git a/test_ws_client.py b/test_ws_client.py index 4cba17726b..519868b63a 100644 --- a/test_ws_client.py +++ b/test_ws_client.py @@ -2,40 +2,61 @@ import json import sys +import asyncio +import websockets -def main(): + +async def main(): + uri = "ws://127.0.0.1:5000/ping1" headers = { - "X-Custom-Header": "SomeValue", - # 'Authorization': 'Bearer YourToken', + 'X-Custom-Header': 'SomeValue', + 'Authorization': 'Bearer YourToken', } - ws = Client.connect("ws://127.0.0.1:5000/ping1", headers=headers) - try: - print("Connected to the WebSocket server!") - - # Get initial metadata message - initial_msg = json.loads(ws.receive()) - print(initial_msg) - if initial_msg.get("type") != "metadata": - print("ERROR: Server metadata not received!") - ws.close() - sys.exit(1) - - server_header = initial_msg.get("headers", {}).get("X-Server-Header") - if not server_header: - print("ERROR: Server header not found in metadata!") - ws.close() - sys.exit(1) - print(f"Server header received: {server_header}") + async with websockets.connect(uri, extra_headers=headers) as ws: while True: data = input("> ") - ws.send(data) - data = ws.receive() - print(f"< {data}") + await ws.send(data) + response = await ws.recv() + print(f"< {response}") + - except (KeyboardInterrupt, EOFError, ConnectionClosed) as e: - print(f"Connection closed: {e}") - ws.close() +if __name__ == "__main__": + asyncio.run(main()) + +# def main(): +# headers = { +# "X-Custom-Header": "SomeValue", +# # 'Authorization': 'Bearer YourToken', +# } +# ws = Client.connect("ws://127.0.0.1:5000/ping1", headers=headers) +# try: +# print("Connected to the WebSocket server!") +# +# # Get initial metadata message +# initial_msg = json.loads(ws.receive()) +# print(initial_msg) +# if initial_msg.get("type") != "metadata": +# print("ERROR: Server metadata not received!") +# ws.close() +# sys.exit(1) +# +# server_header = initial_msg.get("headers", {}).get("X-Server-Header") +# if not server_header: +# print("ERROR: Server header not found in metadata!") +# ws.close() +# sys.exit(1) +# print(f"Server header received: {server_header}") +# +# while True: +# data = input("> ") +# ws.send(data) +# data = ws.receive() +# print(f"< {data}") +# +# except (KeyboardInterrupt, EOFError, ConnectionClosed) as e: +# print(f"Connection closed: {e}") +# ws.close() if __name__ == "__main__": From ae838dc7a6aa2f1d41b360559248aa6637cd68e9 Mon Sep 17 00:00:00 2001 From: Ahmad Wahid Date: Mon, 30 Jun 2025 12:19:57 +0200 Subject: [PATCH 013/171] run ws in async mode --- flexmeasures/ws/ping1.py | 9 ++++++--- test_ws_client.py | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/flexmeasures/ws/ping1.py b/flexmeasures/ws/ping1.py index b9ac398f42..abea9ed18e 100644 --- a/flexmeasures/ws/ping1.py +++ b/flexmeasures/ws/ping1.py @@ -5,6 +5,7 @@ from sqlalchemy import select, func import uuid import json + logger = logging.getLogger(__name__) @@ -12,17 +13,19 @@ async def echo1(ws): headers = ws.environ # Access all headers from the connection client_id = str(uuid.uuid4()) - + logger.info("-----------------------------------------") logger.info(f"Received headers: {headers}") logger.info("-----------------------------------------") logger.info(f"Type of ws: {type(ws)}") logger.info(f"Client ID: {client_id}") - await ws.send(json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}})) + await ws.send( + json.dumps({"type": "metadata", "headers": {"X-Server-Header": "ServerValue"}}) + ) while True: data = await ws.receive() logger.error("ping1>" + data) if data == "close": break # sensors = current_app.db.session.execute(select(func.count(Sensor.id))).scalar() - await ws.send(data ) + await ws.send(data) diff --git a/test_ws_client.py b/test_ws_client.py index 519868b63a..9c587c8fd3 100644 --- a/test_ws_client.py +++ b/test_ws_client.py @@ -9,8 +9,8 @@ async def main(): uri = "ws://127.0.0.1:5000/ping1" headers = { - 'X-Custom-Header': 'SomeValue', - 'Authorization': 'Bearer YourToken', + "X-Custom-Header": "SomeValue", + "Authorization": "Bearer YourToken", } async with websockets.connect(uri, extra_headers=headers) as ws: From e4bbd7a3c08d7b676e31090ed7ccd2f4dc9bbc07 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Mon, 30 Jun 2025 15:12:36 +0200 Subject: [PATCH 014/171] Trying to run async ws --- Makefile | 8 ++--- flexmeasures/app.py | 25 ++++----------- flexmeasures/ws/s2_ws.py | 67 +++++++++++++++++++++++---------------- requirements/3.12/app.txt | 11 +++++-- requirements/3.9/app.txt | 26 +++++++-------- requirements/3.9/dev.txt | 22 +------------ requirements/3.9/docs.txt | 14 +------- requirements/3.9/test.txt | 22 +------------ s2_client_rm.py | 16 ++++++---- test_ws_client.py | 3 +- 10 files changed, 84 insertions(+), 130 deletions(-) diff --git a/Makefile b/Makefile index c1d3359a7c..90ac74a4ce 100644 --- a/Makefile +++ b/Makefile @@ -42,10 +42,10 @@ install-for-dev: make ensure-deps-folder pip-sync requirements/${PYV}/app.txt requirements/${PYV}/dev.txt requirements/${PYV}/test.txt make install-flexmeasures -# Locally install HiGHS on macOS - @if [ "$(shell uname)" = "Darwin" ]; then \ - make install-highs-macos; \ - fi +# # Locally install HiGHS on macOS +# @if [ "$(shell uname)" = "Darwin" ]; then \ +# make install-highs-macos; \ +# fi install-for-test: make install-pip-tools diff --git a/flexmeasures/app.py b/flexmeasures/app.py index fa02ac0445..707e4e425f 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -71,10 +71,7 @@ def create( # noqa C901 if plugins: app.config["FLEXMEASURES_PLUGINS"] += plugins add_basic_error_handlers(app) - if ( - app.config.get("FLEXMEASURES_ENV") not in ("development", "documentation") - and not app.testing - ): + if app.config.get("FLEXMEASURES_ENV") not in ("development", "documentation") and not app.testing: init_sentry(app) app.mail = Mail(app) @@ -85,9 +82,7 @@ def create( # noqa C901 if app.testing: from fakeredis import FakeStrictRedis - redis_conn = FakeStrictRedis( - host="redis", port="1234" - ) # dummy connection details + redis_conn = FakeStrictRedis(host="redis", port="1234") # dummy connection details else: redis_conn = Redis( app.config["FLEXMEASURES_REDIS_URL"], @@ -143,9 +138,7 @@ def create( # noqa C901 schedulers = get_classes_module("flexmeasures.data.models", planning.Scheduler) app.data_generators = dict() - app.data_generators["reporter"] = copy( - reporters - ) # use copy to avoid mutating app.reporters + app.data_generators["reporter"] = copy(reporters) # use copy to avoid mutating app.reporters app.data_generators["scheduler"] = schedulers # add auth policy @@ -219,9 +212,7 @@ def teardown_request(exception=None): if app.config.get("FLEXMEASURES_PROFILE_REQUESTS", False): diff = time.time() - g.start if all([kw not in request.url for kw in ["/static", "favicon.ico"]]): - app.logger.info( - f"[PROFILE] {str(round(diff, 2)).rjust(6)} seconds to serve {request.url}." - ) + app.logger.info(f"[PROFILE] {str(round(diff, 2)).rjust(6)} seconds to serve {request.url}.") if not hasattr(g, "profiler"): return app g.profiler.stop() @@ -231,13 +222,9 @@ def teardown_request(exception=None): endpoint = "unknown" today = date.today() profile_filename = f"pyinstrument_{endpoint}.html" - profile_output_path = Path( - "profile_reports", today.strftime("%Y-%m-%d") - ) + profile_output_path = Path("profile_reports", today.strftime("%Y-%m-%d")) profile_output_path.mkdir(parents=True, exist_ok=True) - with open( - os.path.join(profile_output_path, profile_filename), "w+" - ) as f: + with open(os.path.join(profile_output_path, profile_filename), "w+") as f: f.write(output_html) return app diff --git a/flexmeasures/ws/s2_ws.py b/flexmeasures/ws/s2_ws.py index 7562995257..c6cc5818c7 100644 --- a/flexmeasures/ws/s2_ws.py +++ b/flexmeasures/ws/s2_ws.py @@ -65,16 +65,20 @@ def do_message() -> None: eventloop = asyncio.get_event_loop() await eventloop.run_in_executor(executor=None, func=do_message) except Exception: + message_id = getattr(msg, "message_id", "N/A") logger.error( "While processing message %s an unrecoverable error occurred.", - msg.message_id, # type: ignore[attr-defined] + message_id, ) logger.error("Error: %s", traceback.format_exc()) await server.respond_with_reception_status( - subject_message_id=msg.message_id, # type: ignore[attr-defined] + subject_message_id=getattr( + msg, + "message_id", + uuid.UUID("00000000-0000-0000-0000-000000000000"), + ), status=ReceptionStatusValues.PERMANENT_ERROR, - diagnostic_label=f"While processing message {msg.message_id} " # type: ignore[attr-defined] - f"an unrecoverable error occurred.", + diagnostic_label=f"While processing message {message_id} an unrecoverable error occurred.", websocket=websocket, ) raise @@ -99,7 +103,7 @@ class S2FlaskWSServer: def __init__( self, role: EnergyManagementRole = EnergyManagementRole.CEM, - ws_path: str = "/", + ws_path: str = "/s2", app: Optional[Flask] = None, sock: Optional[Sock] = None, ) -> None: @@ -114,8 +118,8 @@ def __init__( self.role = role self.ws_path = ws_path - self.app = app or Flask(__name__) - self.sock = sock or Sock(self.app) + self.app = app if app else Flask(__name__) + self.sock = sock if sock else Sock(self.app) self._handlers = MessageHandlers() self.s2_parser = S2Parser() @@ -123,6 +127,7 @@ def __init__( self.reception_status_awaiter = ReceptionStatusAwaiter() self._register_default_handlers() + self.sock.route(self.ws_path)(self._ws_handler) def _register_default_handlers(self) -> None: """Register default message handlers.""" @@ -137,16 +142,16 @@ def _ws_handler(self, ws: Sock) -> None: be able to run the async handler directly. """ try: + self.app.logger.info("Received connection from client") asyncio.run(self._handle_websocket_connection(ws)) except Exception as e: # The websocket is likely closed, or another network error occurred. - logger.error("Error in websocket handler: %s", e) - - @sock.route("/s2") + self.app.logger.error("Error in websocket handler: %s", e) + async def _handle_websocket_connection(self, websocket: Sock) -> None: """Handle incoming WebSocket connections.""" client_id = str(uuid.uuid4()) - logger.info("Client %s connected.", client_id) + self.app.logger.info("Client %s connected.", client_id) self._connections[client_id] = websocket try: @@ -192,14 +197,14 @@ async def _handle_websocket_connection(self, websocket: Sock) -> None: websocket=websocket, ) except Exception as e: - logger.error("Error processing message: %s", str(e)) + self.app.logger.error("Error processing message: %s", str(e)) raise except ConnectionClosed: - logger.info("Connection with client %s closed", client_id) + self.app.logger.info("Connection with client %s closed", client_id) finally: if client_id in self._connections: del self._connections[client_id] - logger.info("Client %s disconnected", client_id) + self.app.logger.info("Client %s disconnected", client_id) async def respond_with_reception_status( self, @@ -210,13 +215,15 @@ async def respond_with_reception_status( ) -> None: """Send a reception status response.""" response = ReceptionStatus( - subject_message_id=subject_message_id, status=status, diagnostic_label=diagnostic_label + subject_message_id=subject_message_id, + status=status, + diagnostic_label=diagnostic_label, ) - logger.info("Sending reception status %s for message %s", status, subject_message_id) + self.app.logger.info("Sending reception status %s for message %s", status, subject_message_id) try: await websocket.send(response.to_json()) except ConnectionClosed: - logger.warning("Connection closed while sending reception status") + self.app.logger.warning("Connection closed while sending reception status") async def send_msg_and_await_reception_status_async( self, @@ -227,25 +234,26 @@ async def send_msg_and_await_reception_status_async( ) -> ReceptionStatus: """Send a message and await a reception status.""" await self._send_and_forget(s2_msg, websocket) + message_id = getattr(s2_msg, "message_id", uuid.UUID("00000000-0000-0000-0000-000000000000")) try: - response = await asyncio.wait_for(websocket.receive(), timeout=timeout_reception_status) + await asyncio.wait_for(websocket.receive(), timeout=timeout_reception_status) # Assuming the response is the correct reception status return ReceptionStatus( - subject_message_id=s2_msg.message_id, # type: ignore[attr-defined] + subject_message_id=message_id, status=ReceptionStatusValues.OK, diagnostic_label="Reception status received.", ) except asyncio.TimeoutError: if raise_on_error: - raise TimeoutError(f"Did not receive a reception status on time for {s2_msg.message_id}") # type: ignore[attr-defined] + raise TimeoutError(f"Did not receive a reception status on time for {message_id}") return ReceptionStatus( - subject_message_id=s2_msg.message_id, # type: ignore[attr-defined] + subject_message_id=message_id, status=ReceptionStatusValues.PERMANENT_ERROR, diagnostic_label="Timeout waiting for reception status.", ) except ConnectionClosed: return ReceptionStatus( - subject_message_id=s2_msg.message_id, # type: ignore[attr-defined] + subject_message_id=message_id, status=ReceptionStatusValues.OK, diagnostic_label="Connection closed, assuming OK status.", ) @@ -254,11 +262,14 @@ async def handle_handshake(self, _: "S2FlaskWSServer", message: S2Message, webso """Handle handshake messages.""" if not isinstance(message, Handshake): return - + self.app.logger.info("Received Handshake: %s", message.to_json()) handshake_response = HandshakeResponse( - message_id=message.message_id, selected_protocol_version=message.supported_protocol_versions + message_id=message.message_id, + selected_protocol_version=( + message.supported_protocol_versions[0] if message.supported_protocol_versions else "2.0.0" + ), # TODO: proper version negotiation ) - await self.send_msg_and_await_reception_status_async(handshake_response, websocket) + await self._send_and_forget(handshake_response, websocket) await self.respond_with_reception_status( subject_message_id=message.message_id, @@ -271,20 +282,20 @@ async def handle_reception_status(self, _: "S2FlaskWSServer", message: S2Message """Handle reception status messages.""" if not isinstance(message, ReceptionStatus): return - logger.info("Received ReceptionStatus in handle_reception_status: %s", message.to_json()) + self.app.logger.info("Received ReceptionStatus in handle_reception_status: %s", message.to_json()) async def handle_handshake_response(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: """Handle handshake response messages.""" if not isinstance(message, HandshakeResponse): return - logger.debug("Received HandshakeResponse: %s", message.to_json()) + self.app.logger.debug("Received HandshakeResponse: %s", message.to_json()) async def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: """Send a message and forget about it.""" try: await websocket.send(s2_msg.to_json()) except ConnectionClosed: - logger.warning("Connection closed while sending message") + self.app.logger.warning("Connection closed while sending message") async def send_select_control_type(self, control_type: ControlType, websocket: Sock) -> None: """Select the control type.""" diff --git a/requirements/3.12/app.txt b/requirements/3.12/app.txt index 0bee6560d0..d4424d2227 100644 --- a/requirements/3.12/app.txt +++ b/requirements/3.12/app.txt @@ -74,6 +74,7 @@ flask==3.1.1 # flask-migrate # flask-principal # flask-security-too + # flask-sock # flask-sqlalchemy # flask-sslify # flask-wtf @@ -99,6 +100,8 @@ flask-principal==0.4.0 # via flask-security-too flask-security-too==5.6.2 # via -r requirements/app.in +flask-sock==0.7.0 + # via -r requirements/app.in flask-sqlalchemy==3.1.1 # via # -r requirements/app.in @@ -115,8 +118,8 @@ flexparser==0.4 # via pint fonttools==4.58.2 # via matplotlib -greenlet==3.2.3 - # via sqlalchemy +h11==0.16.0 + # via wsproto humanize==4.12.3 # via -r requirements/app.in idna==3.10 @@ -320,6 +323,8 @@ scipy==1.15.3 # timetomodel sentry-sdk[flask]==2.29.1 # via -r requirements/app.in +simple-websocket==1.1.0 + # via flask-sock six==1.17.0 # via python-dateutil sktime==0.37.0 @@ -384,6 +389,8 @@ werkzeug==3.1.3 # flask-login workalendar==17.0.0 # via -r requirements/app.in +wsproto==1.2.0 + # via simple-websocket wtforms==3.2.1 # via # flask-security-too diff --git a/requirements/3.9/app.txt b/requirements/3.9/app.txt index 3ae819bdb3..495df90b54 100644 --- a/requirements/3.9/app.txt +++ b/requirements/3.9/app.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --output-file=requirements/3.9/app.txt requirements/app.in @@ -76,6 +76,7 @@ flask==3.1.1 # flask-migrate # flask-principal # flask-security-too + # flask-sock # flask-sqlalchemy # flask-sslify # flask-wtf @@ -101,6 +102,8 @@ flask-principal==0.4.0 # via flask-security-too flask-security-too==5.6.2 # via -r requirements/app.in +flask-sock==0.7.0 + # via -r requirements/app.in flask-sqlalchemy==3.1.1 # via # -r requirements/app.in @@ -117,8 +120,8 @@ flexparser==0.4 # via pint fonttools==4.58.2 # via matplotlib -greenlet==3.2.3 - # via sqlalchemy +h11==0.16.0 + # via wsproto humanize==4.12.3 # via -r requirements/app.in idna==3.10 @@ -129,13 +132,9 @@ idna==3.10 importlib-metadata==8.7.0 # via # -r requirements/app.in - # flask # timely-beliefs - # typeguard importlib-resources==6.5.2 - # via - # flask-security-too - # matplotlib + # via flask-security-too inflect==7.5.0 # via -r requirements/app.in inflection==0.5.1 @@ -326,6 +325,8 @@ scipy==1.13.1 # timetomodel sentry-sdk[flask]==2.29.1 # via -r requirements/app.in +simple-websocket==1.1.0 + # via flask-sock six==1.17.0 # via python-dateutil sktime==0.37.0 @@ -350,8 +351,6 @@ timetomodel==0.7.3 # via -r requirements/app.in tldextract==5.3.0 # via -r requirements/app.in -tomli==2.2.1 - # via alembic typeguard==4.4.3 # via inflect types-python-dateutil==2.9.0.20250516 @@ -362,7 +361,6 @@ typing-extensions==4.14.0 # altair # flexcache # flexparser - # marshmallow-sqlalchemy # pint # py-moneyed # pydantic @@ -393,6 +391,8 @@ werkzeug==3.1.3 # flask-login workalendar==17.0.0 # via -r requirements/app.in +wsproto==1.2.0 + # via simple-websocket wtforms==3.2.1 # via # flask-security-too @@ -400,9 +400,7 @@ wtforms==3.2.1 xlrd==2.0.1 # via -r requirements/app.in zipp==3.23.0 - # via - # importlib-metadata - # importlib-resources + # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/requirements/3.9/dev.txt b/requirements/3.9/dev.txt index 139233860a..8810f421b7 100644 --- a/requirements/3.9/dev.txt +++ b/requirements/3.9/dev.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --constraint=requirements/3.9/app.txt --constraint=requirements/3.9/test.txt --output-file=requirements/3.9/dev.txt requirements/dev.in @@ -27,11 +27,6 @@ flake8-blind-except==0.2.1 # via -r requirements/dev.in identify==2.6.12 # via pre-commit -importlib-metadata==8.7.0 - # via - # -c requirements/3.9/app.txt - # -c requirements/3.9/test.txt - # build mccabe==0.7.0 # via flake8 mypy==1.16.0 @@ -77,30 +72,15 @@ pyyaml==6.0.2 # pre-commit setuptools-scm==8.0.4 # via -r requirements/dev.in -tomli==2.2.1 - # via - # -c requirements/3.9/app.txt - # -c requirements/3.9/test.txt - # black - # build - # mypy - # setuptools-scm typing-extensions==4.14.0 # via # -c requirements/3.9/app.txt - # -c requirements/3.9/test.txt - # black # mypy # setuptools-scm virtualenv==20.31.2 # via pre-commit watchdog==6.0.0 # via -r requirements/dev.in -zipp==3.23.0 - # via - # -c requirements/3.9/app.txt - # -c requirements/3.9/test.txt - # importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/requirements/3.9/docs.txt b/requirements/3.9/docs.txt index d7cdcfee3b..15d2d3c7bd 100644 --- a/requirements/3.9/docs.txt +++ b/requirements/3.9/docs.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --constraint=requirements/3.9/app.txt --output-file=requirements/3.9/docs.txt requirements/docs.in @@ -29,10 +29,6 @@ idna==3.10 # requests imagesize==1.4.1 # via sphinx -importlib-metadata==8.7.0 - # via - # -c requirements/3.9/app.txt - # sphinx jinja2==3.1.6 # via # -c requirements/3.9/app.txt @@ -92,15 +88,7 @@ sphinxcontrib-qthelp==2.0.0 # via sphinx sphinxcontrib-serializinghtml==2.0.0 # via sphinx -tomli==2.2.1 - # via - # -c requirements/3.9/app.txt - # sphinx urllib3==2.4.0 # via # -c requirements/3.9/app.txt # requests -zipp==3.23.0 - # via - # -c requirements/3.9/app.txt - # importlib-metadata diff --git a/requirements/3.9/test.txt b/requirements/3.9/test.txt index 43b89922e5..7506a95605 100644 --- a/requirements/3.9/test.txt +++ b/requirements/3.9/test.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --constraint=requirements/3.9/app.txt --output-file=requirements/3.9/test.txt requirements/test.in @@ -28,8 +28,6 @@ coverage[toml]==7.8.2 # via pytest-cov et-xmlfile==2.0.0 # via openpyxl -exceptiongroup==1.3.0 - # via pytest fakeredis==2.29.0 # via -r requirements/test.in flask==3.1.1 @@ -42,10 +40,6 @@ idna==3.10 # via # -c requirements/3.9/app.txt # requests -importlib-metadata==8.7.0 - # via - # -c requirements/3.9/app.txt - # flask iniconfig==2.1.0 # via pytest itsdangerous==2.2.0 @@ -109,16 +103,6 @@ sortedcontainers==2.4.0 # via fakeredis termcolor==3.1.0 # via pytest-sugar -tomli==2.2.1 - # via - # -c requirements/3.9/app.txt - # coverage - # pytest -typing-extensions==4.14.0 - # via - # -c requirements/3.9/app.txt - # exceptiongroup - # fakeredis urllib3==2.4.0 # via # -c requirements/3.9/app.txt @@ -128,7 +112,3 @@ werkzeug==3.1.3 # -c requirements/3.9/app.txt # flask # pytest-flask -zipp==3.23.0 - # via - # -c requirements/3.9/app.txt - # importlib-metadata diff --git a/s2_client_rm.py b/s2_client_rm.py index d64440bcb5..0355557a5b 100644 --- a/s2_client_rm.py +++ b/s2_client_rm.py @@ -146,14 +146,14 @@ def deactivate(self, conn: S2Connection) -> None: if __name__ == "__main__": # Configuration - parser = argparse.ArgumentParser(description="S2 pairing example for FRBC RM") - parser.add_argument("--pairing_endpoint", type=str, required=True) - parser.add_argument("--pairing_token", type=str, required=True) + # parser = argparse.ArgumentParser(description="S2 pairing example for FRBC RM") + # parser.add_argument("--pairing_endpoint", type=str, required=True) + # parser.add_argument("--pairing_token", type=str, required=True) - args = parser.parse_args() + # args = parser.parse_args() - pairing_endpoint = args.pairing_endpoint - pairing_token = args.pairing_token + # pairing_endpoint = args.pairing_endpoint + # pairing_token = args.pairing_token # --- Client Setup --- # Create node description @@ -168,6 +168,8 @@ def deactivate(self, conn: S2Connection) -> None: ) # Create a client to perform the pairing + pairing_endpoint = "ws://127.0.0.1:5000/s2" + pairing_token = "1234567890" client = S2DefaultClient( pairing_uri=pairing_endpoint, token=PairingToken(token=pairing_token), @@ -191,7 +193,7 @@ def deactivate(self, conn: S2Connection) -> None: # logger.info("Challenge solved successfully") s2_connection = S2Connection( - url="wss://127.0.0.1:5000/v1", # type: ignore + url="ws://127.0.0.1:5000/s2", # type: ignore role=EnergyManagementRole.RM, control_types=[MyFRBCControlType(), MyNoControlControlType()], asset_details=AssetDetails( diff --git a/test_ws_client.py b/test_ws_client.py index 4cba17726b..de5b57b128 100644 --- a/test_ws_client.py +++ b/test_ws_client.py @@ -8,11 +8,12 @@ def main(): "X-Custom-Header": "SomeValue", # 'Authorization': 'Bearer YourToken', } - ws = Client.connect("ws://127.0.0.1:5000/ping1", headers=headers) + ws = Client.connect("ws://127.0.0.1:5000/s2", headers=headers) try: print("Connected to the WebSocket server!") # Get initial metadata message + ws.send("Hello") initial_msg = json.loads(ws.receive()) print(initial_msg) if initial_msg.get("type") != "metadata": From 8348f5a02647b03f8b5118b1c6b8faf73d96959d Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 2 Jul 2025 00:32:54 +0200 Subject: [PATCH 015/171] Syncronus ws server handling works --- .python-version | 1 + flexmeasures-env/bin/Activate.ps1 | 241 ++++++++++++++++++ flexmeasures-env/bin/activate | 76 ++++++ flexmeasures-env/bin/activate.csh | 37 +++ flexmeasures-env/bin/activate.fish | 75 ++++++ ...5.14-build-250219jnihavxsz-x86_64.AppImage | 1 + flexmeasures-env/bin/python | 1 + flexmeasures-env/bin/python3 | 1 + flexmeasures-env/lib64 | 1 + flexmeasures-env/pyvenv.cfg | 3 + flexmeasures/app.py | 4 +- flexmeasures/ws/s2_ws.py | 20 +- flexmeasures/ws/s2_ws_sync.py | 227 +++++++++++++++++ requirements/3.10/app.txt | 32 ++- 14 files changed, 709 insertions(+), 11 deletions(-) create mode 100644 .python-version create mode 100644 flexmeasures-env/bin/Activate.ps1 create mode 100644 flexmeasures-env/bin/activate create mode 100644 flexmeasures-env/bin/activate.csh create mode 100644 flexmeasures-env/bin/activate.fish create mode 120000 flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage create mode 120000 flexmeasures-env/bin/python create mode 120000 flexmeasures-env/bin/python3 create mode 120000 flexmeasures-env/lib64 create mode 100644 flexmeasures-env/pyvenv.cfg create mode 100644 flexmeasures/ws/s2_ws_sync.py diff --git a/.python-version b/.python-version new file mode 100644 index 0000000000..176e61af2e --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +flex-env diff --git a/flexmeasures-env/bin/Activate.ps1 b/flexmeasures-env/bin/Activate.ps1 new file mode 100644 index 0000000000..2fb3852c3c --- /dev/null +++ b/flexmeasures-env/bin/Activate.ps1 @@ -0,0 +1,241 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/flexmeasures-env/bin/activate b/flexmeasures-env/bin/activate new file mode 100644 index 0000000000..24348a1f79 --- /dev/null +++ b/flexmeasures-env/bin/activate @@ -0,0 +1,76 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV=/home/vladi/Documents/flexmeasures/flexmeasures-env +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/"bin":$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + if [ x'(flexmeasures-env) ' != x ] ; then + PS1='(flexmeasures-env) '"${PS1:-}" + else + if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then + # special case for Aspen magic directories + # see https://aspen.io/ + PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" + else + PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" + fi + fi + export PS1 +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r +fi diff --git a/flexmeasures-env/bin/activate.csh b/flexmeasures-env/bin/activate.csh new file mode 100644 index 0000000000..51ffbec858 --- /dev/null +++ b/flexmeasures-env/bin/activate.csh @@ -0,0 +1,37 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV /home/vladi/Documents/flexmeasures/flexmeasures-env + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/"bin":$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + if (flexmeasures-env != "") then + set env_name = flexmeasures-env + else + if (`basename "VIRTUAL_ENV"` == "__") then + # special case for Aspen magic directories + # see https://aspen.io/ + set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` + else + set env_name = `basename "$VIRTUAL_ENV"` + endif + endif + set prompt = "[$env_name] $prompt" + unset env_name +endif + +alias pydoc python -m pydoc + +rehash diff --git a/flexmeasures-env/bin/activate.fish b/flexmeasures-env/bin/activate.fish new file mode 100644 index 0000000000..5faaa2f89c --- /dev/null +++ b/flexmeasures-env/bin/activate.fish @@ -0,0 +1,75 @@ +# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) +# you cannot run it directly + +function deactivate -d "Exit virtualenv and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + functions -e fish_prompt + set -e _OLD_FISH_PROMPT_OVERRIDE + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + + set -e VIRTUAL_ENV + if test "$argv[1]" != "nondestructive" + # Self destruct! + functions -e deactivate + end +end + +# unset irrelevant variables +deactivate nondestructive + +set -gx VIRTUAL_ENV /home/vladi/Documents/flexmeasures/flexmeasures-env + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/"bin $PATH + +# unset PYTHONHOME if set +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # save the current fish_prompt function as the function _old_fish_prompt + functions -c fish_prompt _old_fish_prompt + + # with the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command + set -l old_status $status + + # Prompt override? + if test -n '(flexmeasures-env) ' + printf "%s%s" '(flexmeasures-env) ' (set_color normal) + else + # ...Otherwise, prepend env + set -l _checkbase (basename "$VIRTUAL_ENV") + if test $_checkbase = "__" + # special case for Aspen magic directories + # see https://aspen.io/ + printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) + else + printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) + end + end + + # Restore the return status of the previous command. + echo "exit $old_status" | . + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" +end diff --git a/flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage b/flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage new file mode 120000 index 0000000000..45bbe433cf --- /dev/null +++ b/flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage @@ -0,0 +1 @@ +/home/vladi/Downloads/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage \ No newline at end of file diff --git a/flexmeasures-env/bin/python b/flexmeasures-env/bin/python new file mode 120000 index 0000000000..a5e6c3cbfb --- /dev/null +++ b/flexmeasures-env/bin/python @@ -0,0 +1 @@ +cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage \ No newline at end of file diff --git a/flexmeasures-env/bin/python3 b/flexmeasures-env/bin/python3 new file mode 120000 index 0000000000..a5e6c3cbfb --- /dev/null +++ b/flexmeasures-env/bin/python3 @@ -0,0 +1 @@ +cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage \ No newline at end of file diff --git a/flexmeasures-env/lib64 b/flexmeasures-env/lib64 new file mode 120000 index 0000000000..7951405f85 --- /dev/null +++ b/flexmeasures-env/lib64 @@ -0,0 +1 @@ +lib \ No newline at end of file diff --git a/flexmeasures-env/pyvenv.cfg b/flexmeasures-env/pyvenv.cfg new file mode 100644 index 0000000000..ffe410ba7f --- /dev/null +++ b/flexmeasures-env/pyvenv.cfg @@ -0,0 +1,3 @@ +home = /home/vladi/Downloads +include-system-site-packages = false +version = 3.8.10 diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 707e4e425f..ac5132aa15 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -43,7 +43,7 @@ def create( # noqa C901 from flexmeasures.utils.config_utils import read_config, configure_logging from flexmeasures.utils.app_utils import set_secret_key, init_sentry from flexmeasures.utils.error_utils import add_basic_error_handlers - from flexmeasures.ws.s2_ws import S2FlaskWSServer + from flexmeasures.ws.s2_ws_sync import S2FlaskWSServerSync # Create app @@ -56,7 +56,7 @@ def create( # noqa C901 from flexmeasures.ws import sock sock.init_app(app) - s2_ws = S2FlaskWSServer(app=app, sock=sock) # noqa: F841 + s2_ws = S2FlaskWSServerSync(app=app, sock=sock) # noqa: F841 if env is not None: # overwrite app.config["FLEXMEASURES_ENV"] = env diff --git a/flexmeasures/ws/s2_ws.py b/flexmeasures/ws/s2_ws.py index c6cc5818c7..89e73abc93 100644 --- a/flexmeasures/ws/s2_ws.py +++ b/flexmeasures/ws/s2_ws.py @@ -145,7 +145,6 @@ def _ws_handler(self, ws: Sock) -> None: self.app.logger.info("Received connection from client") asyncio.run(self._handle_websocket_connection(ws)) except Exception as e: - # The websocket is likely closed, or another network error occurred. self.app.logger.error("Error in websocket handler: %s", e) async def _handle_websocket_connection(self, websocket: Sock) -> None: @@ -278,17 +277,24 @@ async def handle_handshake(self, _: "S2FlaskWSServer", message: S2Message, webso websocket=websocket, ) - async def handle_reception_status(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: - """Handle reception status messages.""" - if not isinstance(message, ReceptionStatus): - return - self.app.logger.info("Received ReceptionStatus in handle_reception_status: %s", message.to_json()) - async def handle_handshake_response(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: """Handle handshake response messages.""" if not isinstance(message, HandshakeResponse): return self.app.logger.debug("Received HandshakeResponse: %s", message.to_json()) + # Send ReceptionStatus (OK) for the HandshakeResponse message + await self.respond_with_reception_status( + subject_message_id=message.message_id, + status=ReceptionStatusValues.OK, + diagnostic_label="HandshakeResponse processed okay.", + websocket=websocket, + ) + + async def handle_reception_status(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: + """Handle reception status messages.""" + if not isinstance(message, ReceptionStatus): + return + self.app.logger.info("Received ReceptionStatus in handle_reception_status: %s", message.to_json()) async def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: """Send a message and forget about it.""" diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py new file mode 100644 index 0000000000..1aa4d96dda --- /dev/null +++ b/flexmeasures/ws/s2_ws_sync.py @@ -0,0 +1,227 @@ +""" +Flask implementation of the S2 protocol WebSocket server (sync mode only). +""" + +import json +import logging +import traceback +import uuid +from typing import Any, Callable, Dict, Optional, Type + +from flask import Flask +from flask_sock import ConnectionClosed, Sock + +from s2python.common import ( + ControlType, + EnergyManagementRole, + Handshake, + HandshakeResponse, + ReceptionStatus, + ReceptionStatusValues, + SelectControlType, + ResourceManagerDetails +) +from s2python.message import S2Message +from s2python.s2_parser import S2Parser +from s2python.s2_validation_error import S2ValidationError + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("S2FlaskWSServerSync") + + +class MessageHandlersSync: + """Class to manage sync message handlers for different message types.""" + handlers: Dict[Type[S2Message], Callable] + + def __init__(self) -> None: + self.handlers = {} + + def handle_message( + self, + server: "S2FlaskWSServerSync", + msg: S2Message, + websocket: Sock, + ) -> None: + """Handle the S2 message using the registered handler.""" + handler = self.handlers.get(type(msg)) + if handler is not None: + try: + handler(server, msg, websocket) + except Exception: + message_id = getattr(msg, "message_id", "N/A") + logger.error( + "While processing message %s an unrecoverable error occurred.", + message_id, + ) + logger.error("Error: %s", traceback.format_exc()) + server.respond_with_reception_status( + subject_message_id=getattr( + msg, + "message_id", + uuid.UUID("00000000-0000-0000-0000-000000000000"), + ), + status=ReceptionStatusValues.PERMANENT_ERROR, + diagnostic_label=f"While processing message {message_id} an unrecoverable error occurred.", + websocket=websocket, + ) + raise + else: + logger.warning( + "Received a message of type %s but no handler is registered. Ignoring the message.", + type(msg), + ) + + def register_handler(self, msg_type: Type[S2Message], handler: Callable[..., Any]) -> None: + self.handlers[msg_type] = handler + + +class S2FlaskWSServerSync: + """Flask-based WebSocket server implementation for S2 protocol (sync mode only).""" + + def __init__( + self, + role: EnergyManagementRole = EnergyManagementRole.CEM, + ws_path: str = "/s2", + app: Optional[Flask] = None, + sock: Optional[Sock] = None, + ) -> None: + self.role = role + self.ws_path = ws_path + self.app = app if app else Flask(__name__) + self.sock = sock if sock else Sock(self.app) + self._handlers = MessageHandlersSync() + self.s2_parser = S2Parser() + self._connections: Dict[str, Sock] = {} + self._register_default_handlers() + self.sock.route(self.ws_path)(self._ws_handler) + + def _register_default_handlers(self) -> None: + self._handlers.register_handler(Handshake, self.handle_handshake) + self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) + + def _ws_handler(self, ws: Sock) -> None: + try: + self.app.logger.info("Received connection from client") + self._handle_websocket_connection(ws) + except Exception as e: + self.app.logger.error("Error in websocket handler: %s", e) + + def _handle_websocket_connection(self, websocket: Sock) -> None: + client_id = str(uuid.uuid4()) + self.app.logger.info("Client %s connected (sync).", client_id) + self._connections[client_id] = websocket + try: + while True: + message = websocket.receive() + try: + s2_msg = self.s2_parser.parse_as_any_message(message) + self.app.logger.info("Received message in _handle_websocket_connection: %s", s2_msg.to_json()) + except json.JSONDecodeError: + self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Not valid json.", + websocket=websocket, + ) + continue + try: + if not isinstance(s2_msg, ReceptionStatus): + + self.respond_with_reception_status( + subject_message_id=s2_msg.message_id, + status=ReceptionStatusValues.OK, + diagnostic_label="Message received.", + websocket=websocket, + ) + self._handlers.handle_message(self, s2_msg, websocket) + except json.JSONDecodeError: + self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Not valid json.", + websocket=websocket, + ) + except S2ValidationError as e: + json_msg = json.loads(message) + message_id = json_msg.get("message_id") + if message_id: + self.respond_with_reception_status( + subject_message_id=message_id, + status=ReceptionStatusValues.INVALID_MESSAGE, + diagnostic_label=str(e), + websocket=websocket, + ) + else: + self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Message appears valid json but could not find a message_id field.", + websocket=websocket, + ) + except Exception as e: + self.app.logger.error("Error processing message: %s", str(e)) + raise + except ConnectionClosed: + self.app.logger.info("Connection with client %s closed (sync)", client_id) + finally: + if client_id in self._connections: + del self._connections[client_id] + self.app.logger.info("Client %s disconnected (sync)", client_id) + + def respond_with_reception_status( + self, + subject_message_id: uuid.UUID, + status: ReceptionStatusValues, + diagnostic_label: str, + websocket: Sock, + ) -> None: + response = ReceptionStatus( + subject_message_id=subject_message_id, + status=status, + diagnostic_label=diagnostic_label, + ) + self.app.logger.info("Sending reception status %s for message %s (sync)", status, subject_message_id) + try: + websocket.send(response.to_json()) + except ConnectionClosed: + self.app.logger.warning("Connection closed while sending reception status (sync)") + + def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: + try: + websocket.send(s2_msg.to_json()) + except ConnectionClosed: + self.app.logger.warning("Connection closed while sending message (sync)") + + def handle_handshake(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + if not isinstance(message, Handshake): + return + self.app.logger.info("Received Handshake (sync): %s", message.to_json()) + + handshake_response = HandshakeResponse( + message_id=message.message_id, + selected_protocol_version="1.0.0", + ) + self._send_and_forget(handshake_response, websocket) + self.app.logger.info("HandshakeResponse sent (sync)") + # If client is RM, send control type selection + if hasattr(message, "role") and message.role == EnergyManagementRole.RM: + self.app.logger.info("Sending control type selection (sync)") + select_control_type = SelectControlType( + message_id=uuid.uuid4(), + control_type=ControlType.FILL_RATE_BASED_CONTROL, + ) + self._send_and_forget(select_control_type, websocket) + self.app.logger.info("SelectControlType sent (sync)") + + def handle_reception_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + if not isinstance(message, ReceptionStatus): + return + self.app.logger.info("Received ReceptionStatus (sync): %s", message.to_json()) + + def handle_ResourceManagerDetails(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + if not isinstance(message, ResourceManagerDetails): + return + self.app.logger.info("Received ResourceManagerDetails (sync): %s", message.to_json()) + + diff --git a/requirements/3.10/app.txt b/requirements/3.10/app.txt index ebb0f0fb74..d170dd5341 100644 --- a/requirements/3.10/app.txt +++ b/requirements/3.10/app.txt @@ -26,6 +26,8 @@ babel==2.17.0 # via py-moneyed bcrypt==4.0.1 # via -r requirements/app.in +binapy==0.8.0 + # via jwskate blinker==1.9.0 # via # flask @@ -37,7 +39,9 @@ certifi==2025.4.26 # requests # sentry-sdk cffi==1.17.1 - # via argon2-cffi-bindings + # via + # argon2-cffi-bindings + # cryptography charset-normalizer==3.4.2 # via requests click==8.1.8 @@ -46,12 +50,15 @@ click==8.1.8 # click-default-group # flask # rq + # s2-python click-default-group==1.2.4 # via -r requirements/app.in contourpy==1.3.2 # via matplotlib convertdate==2.4.0 # via workalendar +cryptography==45.0.4 + # via jwskate cycler==0.12.1 # via matplotlib dill==0.4.0 @@ -76,6 +83,7 @@ flask==3.1.1 # flask-migrate # flask-principal # flask-security-too + # flask-sock # flask-sqlalchemy # flask-sslify # flask-wtf @@ -101,6 +109,8 @@ flask-principal==0.4.0 # via flask-security-too flask-security-too==5.6.2 # via -r requirements/app.in +flask-sock==0.7.0 + # via -r requirements/app.in flask-sqlalchemy==3.1.1 # via # -r requirements/app.in @@ -119,6 +129,8 @@ fonttools==4.58.2 # via matplotlib greenlet==3.2.3 # via sqlalchemy +h11==0.16.0 + # via wsproto humanize==4.12.3 # via -r requirements/app.in idna==3.10 @@ -158,6 +170,8 @@ jsonschema==4.24.0 # via altair jsonschema-specifications==2025.4.1 # via jsonschema +jwskate==0.12.2 + # via s2-python kiwisolver==1.4.8 # via matplotlib lunardate==0.2.2 @@ -248,7 +262,9 @@ py-moneyed==3.0 pycparser==2.22 # via cffi pydantic==2.11.5 - # via -r requirements/app.in + # via + # -r requirements/app.in + # s2-python pydantic-core==2.33.2 # via pydantic pyluach==2.2.0 @@ -272,6 +288,7 @@ pytz==2025.2 # via # -r requirements/app.in # pandas + # s2-python # timely-beliefs # timetomodel pyyaml==6.0.2 @@ -293,6 +310,7 @@ referencing==0.36.2 requests==2.32.4 # via # requests-file + # s2-python # tldextract requests-file==2.1.0 # via tldextract @@ -306,6 +324,8 @@ rq==2.3.3 # rq-dashboard rq-dashboard==0.8.3.2 # via -r requirements/app.in +s2-python[ws] @ git+https://github.com/flexiblepower/s2-python.git@feat/flask_server + # via -r requirements/app.in scikit-base==0.12.3 # via sktime scikit-learn==1.6.1 @@ -322,6 +342,8 @@ scipy==1.15.3 # timetomodel sentry-sdk[flask]==2.29.1 # via -r requirements/app.in +simple-websocket==1.1.0 + # via flask-sock six==1.17.0 # via python-dateutil sktime==0.37.0 @@ -356,8 +378,10 @@ typing-extensions==4.14.0 # via # alembic # altair + # binapy # flexcache # flexparser + # jwskate # pint # py-moneyed # pydantic @@ -380,6 +404,8 @@ vl-convert-python==1.8.0 # via -r requirements/app.in webargs==8.7.0 # via -r requirements/app.in +websockets==13.1 + # via s2-python werkzeug==3.1.3 # via # -r requirements/app.in @@ -388,6 +414,8 @@ werkzeug==3.1.3 # flask-login workalendar==17.0.0 # via -r requirements/app.in +wsproto==1.2.0 + # via simple-websocket wtforms==3.2.1 # via # flask-security-too From ad9fec2e4e4a95058aa585450c10c0ef4c34b982 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 9 Jul 2025 00:34:05 +0200 Subject: [PATCH 016/171] Renamed the rm s2 example to make it a test --- s2_client_rm.py => flexmeasures/tests/test_s2_client_rm.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename s2_client_rm.py => flexmeasures/tests/test_s2_client_rm.py (100%) diff --git a/s2_client_rm.py b/flexmeasures/tests/test_s2_client_rm.py similarity index 100% rename from s2_client_rm.py rename to flexmeasures/tests/test_s2_client_rm.py From 1dbeab37e1d830e0f9b944ab1aff3ee3a5c7f730 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 9 Jul 2025 10:04:21 +0200 Subject: [PATCH 017/171] refactor: move test file to tests module within ws subpackage Signed-off-by: F.N. Claessen --- flexmeasures/ws/tests/__init__.py | 0 .../{ => ws}/tests/test_s2_client_rm.py | 32 ++++++++++++++----- 2 files changed, 24 insertions(+), 8 deletions(-) create mode 100644 flexmeasures/ws/tests/__init__.py rename flexmeasures/{ => ws}/tests/test_s2_client_rm.py (87%) diff --git a/flexmeasures/ws/tests/__init__.py b/flexmeasures/ws/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/flexmeasures/tests/test_s2_client_rm.py b/flexmeasures/ws/tests/test_s2_client_rm.py similarity index 87% rename from flexmeasures/tests/test_s2_client_rm.py rename to flexmeasures/ws/tests/test_s2_client_rm.py index 0355557a5b..07596ac9ed 100644 --- a/flexmeasures/tests/test_s2_client_rm.py +++ b/flexmeasures/ws/tests/test_s2_client_rm.py @@ -45,9 +45,13 @@ class MyFRBCControlType(FRBCControlType): - def handle_instruction(self, conn: S2Connection, msg: S2Message, send_okay: Callable[[], None]) -> None: + def handle_instruction( + self, conn: S2Connection, msg: S2Message, send_okay: Callable[[], None] + ) -> None: if not isinstance(msg, FRBCInstruction): - raise RuntimeError(f"Expected an FRBCInstruction but received a message of type {type(msg)}.") + raise RuntimeError( + f"Expected an FRBCInstruction but received a message of type {type(msg)}." + ) print(f"I have received the message {msg} from {conn}") def activate(self, conn: S2Connection) -> None: @@ -68,8 +72,12 @@ def activate(self, conn: S2Connection) -> None: id=operation_mode_id, elements=[ FRBCOperationModeElement( - fill_level_range=NumberRange(start_of_range=0.0, end_of_range=100.0), - fill_rate=NumberRange(start_of_range=-5.0, end_of_range=5.0), + fill_level_range=NumberRange( + start_of_range=0.0, end_of_range=100.0 + ), + fill_rate=NumberRange( + start_of_range=-5.0, end_of_range=5.0 + ), power_ranges=[ PowerRange( start_of_range=-200.0, @@ -89,7 +97,9 @@ def activate(self, conn: S2Connection) -> None: ) ], storage=FRBCStorageDescription( - fill_level_range=NumberRange(start_of_range=0.0, end_of_range=100.0), + fill_level_range=NumberRange( + start_of_range=0.0, end_of_range=100.0 + ), fill_level_label="%", diagnostic_label="Imaginary battery", provides_fill_level_target_profile=True, @@ -107,11 +117,15 @@ def activate(self, conn: S2Connection) -> None: elements=[ FRBCFillLevelTargetProfileElement( duration=Duration.from_milliseconds(30_000), - fill_level_range=NumberRange(start_of_range=20.0, end_of_range=30.0), + fill_level_range=NumberRange( + start_of_range=20.0, end_of_range=30.0 + ), ), FRBCFillLevelTargetProfileElement( duration=Duration.from_milliseconds(300_000), - fill_level_range=NumberRange(start_of_range=40.0, end_of_range=50.0), + fill_level_range=NumberRange( + start_of_range=40.0, end_of_range=50.0 + ), ), ], ) @@ -200,7 +214,9 @@ def deactivate(self, conn: S2Connection) -> None: resource_id=client.client_node_id, name="Some asset", instruction_processing_delay=Duration.from_milliseconds(20), - roles=[Role(role=RoleType.ENERGY_CONSUMER, commodity=Commodity.ELECTRICITY)], + roles=[ + Role(role=RoleType.ENERGY_CONSUMER, commodity=Commodity.ELECTRICITY) + ], currency=Currency.EUR, provides_forecast=False, provides_power_measurements=[CommodityQuantity.ELECTRIC_POWER_L1], From f06b11e432db2108f26760dc9294930e421fb73c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 9 Jul 2025 10:07:29 +0200 Subject: [PATCH 018/171] refactor: set SERVER_URL in one place Signed-off-by: F.N. Claessen --- flexmeasures/ws/tests/test_s2_client_rm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/tests/test_s2_client_rm.py b/flexmeasures/ws/tests/test_s2_client_rm.py index 07596ac9ed..e0c66a32f8 100644 --- a/flexmeasures/ws/tests/test_s2_client_rm.py +++ b/flexmeasures/ws/tests/test_s2_client_rm.py @@ -42,6 +42,7 @@ from s2python.message import S2Message logger = logging.getLogger("s2python") +SERVER_URL = "ws://127.0.0.1:5000" class MyFRBCControlType(FRBCControlType): @@ -182,7 +183,7 @@ def deactivate(self, conn: S2Connection) -> None: ) # Create a client to perform the pairing - pairing_endpoint = "ws://127.0.0.1:5000/s2" + pairing_endpoint = f"{SERVER_URL}/s2" pairing_token = "1234567890" client = S2DefaultClient( pairing_uri=pairing_endpoint, @@ -207,7 +208,7 @@ def deactivate(self, conn: S2Connection) -> None: # logger.info("Challenge solved successfully") s2_connection = S2Connection( - url="ws://127.0.0.1:5000/s2", # type: ignore + url=f"{SERVER_URL}/s2", # type: ignore role=EnergyManagementRole.RM, control_types=[MyFRBCControlType(), MyNoControlControlType()], asset_details=AssetDetails( From 18a02c885886e3c84a912e868f069d71b0cc69b4 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 13 Aug 2025 10:55:15 +0200 Subject: [PATCH 019/171] feat: add auth required to WS endpoint Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 58 +++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 16 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 1aa4d96dda..e678f05d95 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -9,6 +9,7 @@ from typing import Any, Callable, Dict, Optional, Type from flask import Flask +from flask_security import auth_required from flask_sock import ConnectionClosed, Sock from s2python.common import ( @@ -19,7 +20,7 @@ ReceptionStatus, ReceptionStatusValues, SelectControlType, - ResourceManagerDetails + ResourceManagerDetails, ) from s2python.message import S2Message from s2python.s2_parser import S2Parser @@ -32,6 +33,7 @@ class MessageHandlersSync: """Class to manage sync message handlers for different message types.""" + handlers: Dict[Type[S2Message], Callable] def __init__(self) -> None: @@ -72,7 +74,9 @@ def handle_message( type(msg), ) - def register_handler(self, msg_type: Type[S2Message], handler: Callable[..., Any]) -> None: + def register_handler( + self, msg_type: Type[S2Message], handler: Callable[..., Any] + ) -> None: self.handlers[msg_type] = handler @@ -107,6 +111,7 @@ def _ws_handler(self, ws: Sock) -> None: except Exception as e: self.app.logger.error("Error in websocket handler: %s", e) + @auth_required() def _handle_websocket_connection(self, websocket: Sock) -> None: client_id = str(uuid.uuid4()) self.app.logger.info("Client %s connected (sync).", client_id) @@ -116,10 +121,15 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: message = websocket.receive() try: s2_msg = self.s2_parser.parse_as_any_message(message) - self.app.logger.info("Received message in _handle_websocket_connection: %s", s2_msg.to_json()) + self.app.logger.info( + "Received message in _handle_websocket_connection: %s", + s2_msg.to_json(), + ) except json.JSONDecodeError: self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Not valid json.", websocket=websocket, @@ -127,7 +137,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: continue try: if not isinstance(s2_msg, ReceptionStatus): - + self.respond_with_reception_status( subject_message_id=s2_msg.message_id, status=ReceptionStatusValues.OK, @@ -137,7 +147,9 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: self._handlers.handle_message(self, s2_msg, websocket) except json.JSONDecodeError: self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Not valid json.", websocket=websocket, @@ -154,7 +166,9 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: ) else: self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Message appears valid json but could not find a message_id field.", websocket=websocket, @@ -181,11 +195,17 @@ def respond_with_reception_status( status=status, diagnostic_label=diagnostic_label, ) - self.app.logger.info("Sending reception status %s for message %s (sync)", status, subject_message_id) + self.app.logger.info( + "Sending reception status %s for message %s (sync)", + status, + subject_message_id, + ) try: websocket.send(response.to_json()) except ConnectionClosed: - self.app.logger.warning("Connection closed while sending reception status (sync)") + self.app.logger.warning( + "Connection closed while sending reception status (sync)" + ) def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: try: @@ -193,11 +213,13 @@ def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: except ConnectionClosed: self.app.logger.warning("Connection closed while sending message (sync)") - def handle_handshake(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_handshake( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, Handshake): return self.app.logger.info("Received Handshake (sync): %s", message.to_json()) - + handshake_response = HandshakeResponse( message_id=message.message_id, selected_protocol_version="1.0.0", @@ -214,14 +236,18 @@ def handle_handshake(self, _: "S2FlaskWSServerSync", message: S2Message, websock self._send_and_forget(select_control_type, websocket) self.app.logger.info("SelectControlType sent (sync)") - def handle_reception_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_reception_status( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, ReceptionStatus): return self.app.logger.info("Received ReceptionStatus (sync): %s", message.to_json()) - def handle_ResourceManagerDetails(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_ResourceManagerDetails( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, ResourceManagerDetails): return - self.app.logger.info("Received ResourceManagerDetails (sync): %s", message.to_json()) - - + self.app.logger.info( + "Received ResourceManagerDetails (sync): %s", message.to_json() + ) From 9cf4e904b9f48031d62968c130fa3b5048661d82 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 13 Aug 2025 11:39:40 +0200 Subject: [PATCH 020/171] fix: auth check Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index e678f05d95..cfe711cdea 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -8,8 +8,8 @@ import uuid from typing import Any, Callable, Dict, Optional, Type -from flask import Flask -from flask_security import auth_required +from flask import Flask, request, abort +from flask_security import current_user from flask_sock import ConnectionClosed, Sock from s2python.common import ( @@ -105,13 +105,18 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) def _ws_handler(self, ws: Sock) -> None: + # Auth check + if not current_user.is_authenticated: + self.app.logger.warning( + "Unauthorized WS connection attempt from %s", request.remote_addr + ) + abort(401) try: self.app.logger.info("Received connection from client") self._handle_websocket_connection(ws) except Exception as e: self.app.logger.error("Error in websocket handler: %s", e) - @auth_required() def _handle_websocket_connection(self, websocket: Sock) -> None: client_id = str(uuid.uuid4()) self.app.logger.info("Client %s connected (sync).", client_id) From b6b37f6f1df7d1a1374ee142c74f572524a757e2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 13 Aug 2025 12:04:30 +0200 Subject: [PATCH 021/171] fix: less noisy 401, by moving the auth check to before upgrading the connection Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 42 +++++++++++++++++++++++++++++------ flexmeasures/ws/s2_ws_sync.py | 9 +------- 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index ac5132aa15..5a2ef37798 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -10,7 +10,8 @@ from pathlib import Path from datetime import date -from flask import Flask, g, request +from flask import Flask, g, request, Response +from flask_security import current_user from flask.cli import load_dotenv from flask_mail import Mail from flask_sslify import SSLify @@ -71,7 +72,10 @@ def create( # noqa C901 if plugins: app.config["FLEXMEASURES_PLUGINS"] += plugins add_basic_error_handlers(app) - if app.config.get("FLEXMEASURES_ENV") not in ("development", "documentation") and not app.testing: + if ( + app.config.get("FLEXMEASURES_ENV") not in ("development", "documentation") + and not app.testing + ): init_sentry(app) app.mail = Mail(app) @@ -82,7 +86,9 @@ def create( # noqa C901 if app.testing: from fakeredis import FakeStrictRedis - redis_conn = FakeStrictRedis(host="redis", port="1234") # dummy connection details + redis_conn = FakeStrictRedis( + host="redis", port="1234" + ) # dummy connection details else: redis_conn = Redis( app.config["FLEXMEASURES_REDIS_URL"], @@ -138,7 +144,9 @@ def create( # noqa C901 schedulers = get_classes_module("flexmeasures.data.models", planning.Scheduler) app.data_generators = dict() - app.data_generators["reporter"] = copy(reporters) # use copy to avoid mutating app.reporters + app.data_generators["reporter"] = copy( + reporters + ) # use copy to avoid mutating app.reporters app.data_generators["scheduler"] = schedulers # add auth policy @@ -212,7 +220,9 @@ def teardown_request(exception=None): if app.config.get("FLEXMEASURES_PROFILE_REQUESTS", False): diff = time.time() - g.start if all([kw not in request.url for kw in ["/static", "favicon.ico"]]): - app.logger.info(f"[PROFILE] {str(round(diff, 2)).rjust(6)} seconds to serve {request.url}.") + app.logger.info( + f"[PROFILE] {str(round(diff, 2)).rjust(6)} seconds to serve {request.url}." + ) if not hasattr(g, "profiler"): return app g.profiler.stop() @@ -222,9 +232,27 @@ def teardown_request(exception=None): endpoint = "unknown" today = date.today() profile_filename = f"pyinstrument_{endpoint}.html" - profile_output_path = Path("profile_reports", today.strftime("%Y-%m-%d")) + profile_output_path = Path( + "profile_reports", today.strftime("%Y-%m-%d") + ) profile_output_path.mkdir(parents=True, exist_ok=True) - with open(os.path.join(profile_output_path, profile_filename), "w+") as f: + with open( + os.path.join(profile_output_path, profile_filename), "w+" + ) as f: f.write(output_html) + @app.before_request + def ws_handshake_auth(): + # Check if this is the WS handshake route + is_ws_handshake = ( + request.path == s2_ws.ws_path + and request.headers.get("Upgrade", "").lower() == "websocket" + ) + if is_ws_handshake and not current_user.is_authenticated: + app.logger.info( + "Unauthorized WS handshake attempt from %s", request.remote_addr + ) + # Send clean 401 without stack trace noise + return Response("Unauthorized", status=401) + return app diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index cfe711cdea..8a387abde5 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -8,8 +8,7 @@ import uuid from typing import Any, Callable, Dict, Optional, Type -from flask import Flask, request, abort -from flask_security import current_user +from flask import Flask from flask_sock import ConnectionClosed, Sock from s2python.common import ( @@ -105,12 +104,6 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) def _ws_handler(self, ws: Sock) -> None: - # Auth check - if not current_user.is_authenticated: - self.app.logger.warning( - "Unauthorized WS connection attempt from %s", request.remote_addr - ) - abort(401) try: self.app.logger.info("Received connection from client") self._handle_websocket_connection(ws) From 21dd9dec1ce444e1d1053c28d494a2ef3454d264 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 13 Aug 2025 14:32:20 +0200 Subject: [PATCH 022/171] feat: support bearer token set up in config settings Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 5a2ef37798..c3087d751e 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -242,17 +242,26 @@ def teardown_request(exception=None): f.write(output_html) @app.before_request - def ws_handshake_auth(): - # Check if this is the WS handshake route - is_ws_handshake = ( + def ws_connection_auth(): + # Check if this is the S2 WS connection route + is_ws_connection = ( request.path == s2_ws.ws_path and request.headers.get("Upgrade", "").lower() == "websocket" ) - if is_ws_handshake and not current_user.is_authenticated: - app.logger.info( - "Unauthorized WS handshake attempt from %s", request.remote_addr - ) - # Send clean 401 without stack trace noise - return Response("Unauthorized", status=401) + + if not is_ws_connection or current_user.is_authenticated: + return # Let other before_request hooks handle it + + auth_header = request.headers.get("Authorization", "") + if auth_header.startswith("Bearer "): + token = auth_header.removeprefix("Bearer ").strip() + if token == app.config.get("WEBSOCKET_BEARER_TOKEN", None): + return # Let other before_request hooks handle it + + app.logger.info( + "Unauthorized WS handshake attempt from %s", request.remote_addr + ) + # Send clean 401 without stack trace noise + return Response("Unauthorized", status=401) return app From 051a00f8b3c66473af8072cd66b73bb8fdcaa79c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 13 Aug 2025 14:50:53 +0200 Subject: [PATCH 023/171] fix: register handle_ResourceManagerDetails Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 8a387abde5..db9d40fad9 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -102,6 +102,9 @@ def __init__( def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) + self._handlers.register_handler( + ResourceManagerDetails, self.handle_ResourceManagerDetails + ) def _ws_handler(self, ws: Sock) -> None: try: From a36d749e5c8cef2b45db4600faae1a369d086169 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 13 Aug 2025 15:18:37 +0200 Subject: [PATCH 024/171] dev: add scaffolding with todos Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index db9d40fad9..b3d11b62e7 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -252,3 +252,18 @@ def handle_ResourceManagerDetails( self.app.logger.info( "Received ResourceManagerDetails (sync): %s", message.to_json() ) + + # todo: we need to look up the asset that has as its attribute the resource-id matching the resource-id in the message + scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] + from flexmeasures import Asset + from flexmeasures.data import db + asset = db.session.get(Asset, 1) # temporary workaround + + scheduler = scheduler_class(asset, return_multiple=True) + schedule = scheduler.compute() + for entry in schedule: + if isinstance(entry, Instruction): + websocket.send(entry) + elif isinstance(entry, dict) and "sensor" in entry: + # todo: save entry["data"] to sensor (see the code block starting in data/services/scheduling/make_schedule line 598) + pass From 7c65b842cea2eca3c9ee59458b455be98b4e48c3 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Mon, 25 Aug 2025 07:14:43 +0200 Subject: [PATCH 025/171] Full pipeline fails at db Signed-off-by: Vlad Iftime --- .python-version | 2 +- flexmeasures/ws/s2_ws_sync.py | 167 +++++++++++++++++++++++++++++++--- requirements/3.11/app.txt | 32 ++++++- 3 files changed, 184 insertions(+), 17 deletions(-) diff --git a/.python-version b/.python-version index 176e61af2e..3e72aa6986 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -flex-env +3.11.10 diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index b3d11b62e7..5ddb49dfc9 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -21,6 +21,13 @@ SelectControlType, ResourceManagerDetails, ) +from s2python.frbc import ( + FRBCSystemDescription, + FRBCFillLevelTargetProfile, + FRBCStorageStatus, + FRBCActuatorStatus, + FRBCInstruction, +) from s2python.message import S2Message from s2python.s2_parser import S2Parser from s2python.s2_validation_error import S2ValidationError @@ -30,6 +37,26 @@ logger = logging.getLogger("S2FlaskWSServerSync") +class FRBCDeviceData: + """Class to store FRBC device data received from Resource Manager.""" + + def __init__(self): + self.system_description: Optional[FRBCSystemDescription] = None + self.fill_level_target_profile: Optional[FRBCFillLevelTargetProfile] = None + self.storage_status: Optional[FRBCStorageStatus] = None + self.actuator_status: Optional[FRBCActuatorStatus] = None + self.resource_id: Optional[str] = None + + def is_complete(self) -> bool: + """Check if we have received all necessary data to generate instructions.""" + return ( + self.system_description is not None + and self.fill_level_target_profile is not None + and self.storage_status is not None + and self.actuator_status is not None + ) + + class MessageHandlersSync: """Class to manage sync message handlers for different message types.""" @@ -96,6 +123,8 @@ def __init__( self._handlers = MessageHandlersSync() self.s2_parser = S2Parser() self._connections: Dict[str, Sock] = {} + self._device_data: Dict[str, FRBCDeviceData] = {} # Store device data by resource_id + self._websocket_to_resource: Dict[Sock, str] = {} # Map websocket to resource_id self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) @@ -105,6 +134,11 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler( ResourceManagerDetails, self.handle_ResourceManagerDetails ) + # Register FRBC message handlers + self._handlers.register_handler(FRBCSystemDescription, self.handle_frbc_system_description) + self._handlers.register_handler(FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile) + self._handlers.register_handler(FRBCStorageStatus, self.handle_frbc_storage_status) + self._handlers.register_handler(FRBCActuatorStatus, self.handle_frbc_actuator_status) def _ws_handler(self, ws: Sock) -> None: try: @@ -182,6 +216,9 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: finally: if client_id in self._connections: del self._connections[client_id] + # Clean up websocket to resource mapping + if websocket in self._websocket_to_resource: + del self._websocket_to_resource[websocket] self.app.logger.info("Client %s disconnected (sync)", client_id) def respond_with_reception_status( @@ -253,17 +290,119 @@ def handle_ResourceManagerDetails( "Received ResourceManagerDetails (sync): %s", message.to_json() ) - # todo: we need to look up the asset that has as its attribute the resource-id matching the resource-id in the message - scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] - from flexmeasures import Asset - from flexmeasures.data import db - asset = db.session.get(Asset, 1) # temporary workaround - - scheduler = scheduler_class(asset, return_multiple=True) - schedule = scheduler.compute() - for entry in schedule: - if isinstance(entry, Instruction): - websocket.send(entry) - elif isinstance(entry, dict) and "sensor" in entry: - # todo: save entry["data"] to sensor (see the code block starting in data/services/scheduling/make_schedule line 598) - pass + # Store the resource_id from ResourceManagerDetails for device identification + resource_id = str(message.resource_id) + self._websocket_to_resource[websocket] = resource_id + + if resource_id not in self._device_data: + self._device_data[resource_id] = FRBCDeviceData() + self._device_data[resource_id].resource_id = resource_id + + def handle_frbc_system_description( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, FRBCSystemDescription): + return + self.app.logger.info("Received FRBCSystemDescription: %s", message.to_json()) + + # Get resource_id from websocket mapping + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + if resource_id not in self._device_data: + self._device_data[resource_id] = FRBCDeviceData() + + self._device_data[resource_id].system_description = message + self._check_and_generate_instructions(resource_id, websocket) + + def handle_frbc_fill_level_target_profile( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, FRBCFillLevelTargetProfile): + return + self.app.logger.info("Received FRBCFillLevelTargetProfile: %s", message.to_json()) + + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + if resource_id not in self._device_data: + self._device_data[resource_id] = FRBCDeviceData() + + self._device_data[resource_id].fill_level_target_profile = message + self._check_and_generate_instructions(resource_id, websocket) + + def handle_frbc_storage_status( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, FRBCStorageStatus): + return + self.app.logger.info("Received FRBCStorageStatus: %s", message.to_json()) + + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + if resource_id not in self._device_data: + self._device_data[resource_id] = FRBCDeviceData() + + self._device_data[resource_id].storage_status = message + self._check_and_generate_instructions(resource_id, websocket) + + def handle_frbc_actuator_status( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, FRBCActuatorStatus): + return + self.app.logger.info("Received FRBCActuatorStatus: %s", message.to_json()) + + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + if resource_id not in self._device_data: + self._device_data[resource_id] = FRBCDeviceData() + + self._device_data[resource_id].actuator_status = message + self._check_and_generate_instructions(resource_id, websocket) + + def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> None: + """Check if we have all required data and generate instructions if so.""" + device_data = self._device_data.get(resource_id) + if device_data is None or not device_data.is_complete(): + self.app.logger.info(f"Waiting for more data from device {resource_id}") + return + + self.app.logger.info(f"All data received for device {resource_id}, generating instructions") + + try: + # Get scheduler and generate instructions + scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] + from flexmeasures import Asset + from flexmeasures.data import db + + # Try to get asset from database, fallback to mock if DB unavailable + try: + asset = db.session.get(Asset, 1) + if asset is None: + self.app.logger.warning("No asset found with ID 1, using mock asset") + asset = self._create_mock_asset() + except Exception as db_error: + self.app.logger.warning(f"Database unavailable ({db_error}), using mock asset") + asset = self._create_mock_asset() + + # Create scheduler with device data + scheduler = scheduler_class(asset, return_multiple=True) + + # Pass the device data to the scheduler + scheduler.frbc_device_data = device_data + + schedule = scheduler.compute() + for entry in schedule: + if isinstance(entry, FRBCInstruction): + self._send_and_forget(entry, websocket) + elif isinstance(entry, dict) and "sensor" in entry: + # todo: save entry["data"] to sensor (see the code block starting in data/services/scheduling/make_schedule line 598) + pass + + except Exception as e: + self.app.logger.error(f"Error generating instructions for device {resource_id}: {e}") + # Continue processing other devices + + def _create_mock_asset(self): + """Create a mock asset for testing when database is unavailable.""" + class MockAsset: + def __init__(self): + self.id = 1 + self.name = "Mock Asset" + + return MockAsset() diff --git a/requirements/3.11/app.txt b/requirements/3.11/app.txt index cc19ad8506..381e7c2ade 100644 --- a/requirements/3.11/app.txt +++ b/requirements/3.11/app.txt @@ -24,6 +24,8 @@ babel==2.17.0 # via py-moneyed bcrypt==4.0.1 # via -r requirements/app.in +binapy==0.8.0 + # via jwskate blinker==1.9.0 # via # flask @@ -35,7 +37,9 @@ certifi==2025.4.26 # requests # sentry-sdk cffi==1.17.1 - # via argon2-cffi-bindings + # via + # argon2-cffi-bindings + # cryptography charset-normalizer==3.4.2 # via requests click==8.1.8 @@ -44,12 +48,15 @@ click==8.1.8 # click-default-group # flask # rq + # s2-python click-default-group==1.2.4 # via -r requirements/app.in contourpy==1.3.2 # via matplotlib convertdate==2.4.0 # via workalendar +cryptography==45.0.6 + # via jwskate cycler==0.12.1 # via matplotlib dill==0.4.0 @@ -74,6 +81,7 @@ flask==3.1.1 # flask-migrate # flask-principal # flask-security-too + # flask-sock # flask-sqlalchemy # flask-sslify # flask-wtf @@ -99,6 +107,8 @@ flask-principal==0.4.0 # via flask-security-too flask-security-too==5.6.2 # via -r requirements/app.in +flask-sock==0.7.0 + # via -r requirements/app.in flask-sqlalchemy==3.1.1 # via # -r requirements/app.in @@ -117,6 +127,8 @@ fonttools==4.58.2 # via matplotlib greenlet==3.2.3 # via sqlalchemy +h11==0.16.0 + # via wsproto humanize==4.12.3 # via -r requirements/app.in idna==3.10 @@ -156,6 +168,8 @@ jsonschema==4.24.0 # via altair jsonschema-specifications==2025.4.1 # via jsonschema +jwskate==0.12.2 + # via s2-python kiwisolver==1.4.8 # via matplotlib lunardate==0.2.2 @@ -246,7 +260,9 @@ py-moneyed==3.0 pycparser==2.22 # via cffi pydantic==2.11.5 - # via -r requirements/app.in + # via + # -r requirements/app.in + # s2-python pydantic-core==2.33.2 # via pydantic pyluach==2.2.0 @@ -270,6 +286,7 @@ pytz==2025.2 # via # -r requirements/app.in # pandas + # s2-python # timely-beliefs # timetomodel pyyaml==6.0.2 @@ -291,6 +308,7 @@ referencing==0.36.2 requests==2.32.4 # via # requests-file + # s2-python # tldextract requests-file==2.1.0 # via tldextract @@ -304,6 +322,8 @@ rq==2.3.3 # rq-dashboard rq-dashboard==0.8.3.2 # via -r requirements/app.in +s2-python[ws] @ git+https://github.com/flexiblepower/s2-python.git@feat/flask_server + # via -r requirements/app.in scikit-base==0.12.3 # via sktime scikit-learn==1.6.1 @@ -320,6 +340,8 @@ scipy==1.15.3 # timetomodel sentry-sdk[flask]==2.29.1 # via -r requirements/app.in +simple-websocket==1.1.0 + # via flask-sock six==1.17.0 # via python-dateutil sktime==0.37.0 @@ -352,8 +374,10 @@ typing-extensions==4.14.0 # via # alembic # altair + # binapy # flexcache # flexparser + # jwskate # pint # py-moneyed # pydantic @@ -376,6 +400,8 @@ vl-convert-python==1.8.0 # via -r requirements/app.in webargs==8.7.0 # via -r requirements/app.in +websockets==13.1 + # via s2-python werkzeug==3.1.3 # via # -r requirements/app.in @@ -384,6 +410,8 @@ werkzeug==3.1.3 # flask-login workalendar==17.0.0 # via -r requirements/app.in +wsproto==1.2.0 + # via simple-websocket wtforms==3.2.1 # via # flask-security-too From 101fdb45eede85d41be44b5c7c31c1050b75444e Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 26 Aug 2025 14:38:27 +0200 Subject: [PATCH 026/171] Full pipeline: can now call the sceduler without errors on the server or client side Signed-off-by: Vlad Iftime --- .gitignore | 3 +- flexmeasures-env/bin/Activate.ps1 | 241 ------------------ flexmeasures-env/bin/activate | 76 ------ flexmeasures-env/bin/activate.csh | 37 --- flexmeasures-env/bin/activate.fish | 75 ------ ...5.14-build-250219jnihavxsz-x86_64.AppImage | 1 - flexmeasures-env/bin/python | 1 - flexmeasures-env/bin/python3 | 1 - flexmeasures-env/lib64 | 1 - flexmeasures-env/pyvenv.cfg | 3 - flexmeasures.db | Bin 0 -> 61440 bytes flexmeasures/ws/s2_ws_sync.py | 57 ++++- run-flexmeasures.sh | 9 + 13 files changed, 62 insertions(+), 443 deletions(-) delete mode 100644 flexmeasures-env/bin/Activate.ps1 delete mode 100644 flexmeasures-env/bin/activate delete mode 100644 flexmeasures-env/bin/activate.csh delete mode 100644 flexmeasures-env/bin/activate.fish delete mode 120000 flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage delete mode 120000 flexmeasures-env/bin/python delete mode 120000 flexmeasures-env/bin/python3 delete mode 120000 flexmeasures-env/lib64 delete mode 100644 flexmeasures-env/pyvenv.cfg create mode 100644 flexmeasures.db create mode 100755 run-flexmeasures.sh diff --git a/.gitignore b/.gitignore index 8121120351..e1dfd2c8b6 100644 --- a/.gitignore +++ b/.gitignore @@ -53,4 +53,5 @@ coverage.lcov venv* logs/ *.dump -iframe_figures/ \ No newline at end of file +iframe_figures/ +flexmeasures-env-311/ diff --git a/flexmeasures-env/bin/Activate.ps1 b/flexmeasures-env/bin/Activate.ps1 deleted file mode 100644 index 2fb3852c3c..0000000000 --- a/flexmeasures-env/bin/Activate.ps1 +++ /dev/null @@ -1,241 +0,0 @@ -<# -.Synopsis -Activate a Python virtual environment for the current PowerShell session. - -.Description -Pushes the python executable for a virtual environment to the front of the -$Env:PATH environment variable and sets the prompt to signify that you are -in a Python virtual environment. Makes use of the command line switches as -well as the `pyvenv.cfg` file values present in the virtual environment. - -.Parameter VenvDir -Path to the directory that contains the virtual environment to activate. The -default value for this is the parent of the directory that the Activate.ps1 -script is located within. - -.Parameter Prompt -The prompt prefix to display when this virtual environment is activated. By -default, this prompt is the name of the virtual environment folder (VenvDir) -surrounded by parentheses and followed by a single space (ie. '(.venv) '). - -.Example -Activate.ps1 -Activates the Python virtual environment that contains the Activate.ps1 script. - -.Example -Activate.ps1 -Verbose -Activates the Python virtual environment that contains the Activate.ps1 script, -and shows extra information about the activation as it executes. - -.Example -Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv -Activates the Python virtual environment located in the specified location. - -.Example -Activate.ps1 -Prompt "MyPython" -Activates the Python virtual environment that contains the Activate.ps1 script, -and prefixes the current prompt with the specified string (surrounded in -parentheses) while the virtual environment is active. - -.Notes -On Windows, it may be required to enable this Activate.ps1 script by setting the -execution policy for the user. You can do this by issuing the following PowerShell -command: - -PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser - -For more information on Execution Policies: -https://go.microsoft.com/fwlink/?LinkID=135170 - -#> -Param( - [Parameter(Mandatory = $false)] - [String] - $VenvDir, - [Parameter(Mandatory = $false)] - [String] - $Prompt -) - -<# Function declarations --------------------------------------------------- #> - -<# -.Synopsis -Remove all shell session elements added by the Activate script, including the -addition of the virtual environment's Python executable from the beginning of -the PATH variable. - -.Parameter NonDestructive -If present, do not remove this function from the global namespace for the -session. - -#> -function global:deactivate ([switch]$NonDestructive) { - # Revert to original values - - # The prior prompt: - if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { - Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt - Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT - } - - # The prior PYTHONHOME: - if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { - Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME - Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME - } - - # The prior PATH: - if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { - Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH - Remove-Item -Path Env:_OLD_VIRTUAL_PATH - } - - # Just remove the VIRTUAL_ENV altogether: - if (Test-Path -Path Env:VIRTUAL_ENV) { - Remove-Item -Path env:VIRTUAL_ENV - } - - # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: - if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { - Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force - } - - # Leave deactivate function in the global namespace if requested: - if (-not $NonDestructive) { - Remove-Item -Path function:deactivate - } -} - -<# -.Description -Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the -given folder, and returns them in a map. - -For each line in the pyvenv.cfg file, if that line can be parsed into exactly -two strings separated by `=` (with any amount of whitespace surrounding the =) -then it is considered a `key = value` line. The left hand string is the key, -the right hand is the value. - -If the value starts with a `'` or a `"` then the first and last character is -stripped from the value before being captured. - -.Parameter ConfigDir -Path to the directory that contains the `pyvenv.cfg` file. -#> -function Get-PyVenvConfig( - [String] - $ConfigDir -) { - Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" - - # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). - $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue - - # An empty map will be returned if no config file is found. - $pyvenvConfig = @{ } - - if ($pyvenvConfigPath) { - - Write-Verbose "File exists, parse `key = value` lines" - $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath - - $pyvenvConfigContent | ForEach-Object { - $keyval = $PSItem -split "\s*=\s*", 2 - if ($keyval[0] -and $keyval[1]) { - $val = $keyval[1] - - # Remove extraneous quotations around a string value. - if ("'""".Contains($val.Substring(0, 1))) { - $val = $val.Substring(1, $val.Length - 2) - } - - $pyvenvConfig[$keyval[0]] = $val - Write-Verbose "Adding Key: '$($keyval[0])'='$val'" - } - } - } - return $pyvenvConfig -} - - -<# Begin Activate script --------------------------------------------------- #> - -# Determine the containing directory of this script -$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition -$VenvExecDir = Get-Item -Path $VenvExecPath - -Write-Verbose "Activation script is located in path: '$VenvExecPath'" -Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" -Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" - -# Set values required in priority: CmdLine, ConfigFile, Default -# First, get the location of the virtual environment, it might not be -# VenvExecDir if specified on the command line. -if ($VenvDir) { - Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" -} -else { - Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." - $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") - Write-Verbose "VenvDir=$VenvDir" -} - -# Next, read the `pyvenv.cfg` file to determine any required value such -# as `prompt`. -$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir - -# Next, set the prompt from the command line, or the config file, or -# just use the name of the virtual environment folder. -if ($Prompt) { - Write-Verbose "Prompt specified as argument, using '$Prompt'" -} -else { - Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" - if ($pyvenvCfg -and $pyvenvCfg['prompt']) { - Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" - $Prompt = $pyvenvCfg['prompt']; - } - else { - Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)" - Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" - $Prompt = Split-Path -Path $venvDir -Leaf - } -} - -Write-Verbose "Prompt = '$Prompt'" -Write-Verbose "VenvDir='$VenvDir'" - -# Deactivate any currently active virtual environment, but leave the -# deactivate function in place. -deactivate -nondestructive - -# Now set the environment variable VIRTUAL_ENV, used by many tools to determine -# that there is an activated venv. -$env:VIRTUAL_ENV = $VenvDir - -if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { - - Write-Verbose "Setting prompt to '$Prompt'" - - # Set the prompt to include the env name - # Make sure _OLD_VIRTUAL_PROMPT is global - function global:_OLD_VIRTUAL_PROMPT { "" } - Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT - New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt - - function global:prompt { - Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " - _OLD_VIRTUAL_PROMPT - } -} - -# Clear PYTHONHOME -if (Test-Path -Path Env:PYTHONHOME) { - Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME - Remove-Item -Path Env:PYTHONHOME -} - -# Add the venv to the PATH -Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH -$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/flexmeasures-env/bin/activate b/flexmeasures-env/bin/activate deleted file mode 100644 index 24348a1f79..0000000000 --- a/flexmeasures-env/bin/activate +++ /dev/null @@ -1,76 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # This should detect bash and zsh, which have a hash command that must - # be called to get it to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r - fi - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - if [ ! "${1:-}" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV=/home/vladi/Documents/flexmeasures/flexmeasures-env -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/"bin":$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - if [ x'(flexmeasures-env) ' != x ] ; then - PS1='(flexmeasures-env) '"${PS1:-}" - else - if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then - # special case for Aspen magic directories - # see https://aspen.io/ - PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" - else - PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" - fi - fi - export PS1 -fi - -# This should detect bash and zsh, which have a hash command that must -# be called to get it to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r -fi diff --git a/flexmeasures-env/bin/activate.csh b/flexmeasures-env/bin/activate.csh deleted file mode 100644 index 51ffbec858..0000000000 --- a/flexmeasures-env/bin/activate.csh +++ /dev/null @@ -1,37 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV /home/vladi/Documents/flexmeasures/flexmeasures-env - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/"bin":$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - if (flexmeasures-env != "") then - set env_name = flexmeasures-env - else - if (`basename "VIRTUAL_ENV"` == "__") then - # special case for Aspen magic directories - # see https://aspen.io/ - set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` - else - set env_name = `basename "$VIRTUAL_ENV"` - endif - endif - set prompt = "[$env_name] $prompt" - unset env_name -endif - -alias pydoc python -m pydoc - -rehash diff --git a/flexmeasures-env/bin/activate.fish b/flexmeasures-env/bin/activate.fish deleted file mode 100644 index 5faaa2f89c..0000000000 --- a/flexmeasures-env/bin/activate.fish +++ /dev/null @@ -1,75 +0,0 @@ -# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) -# you cannot run it directly - -function deactivate -d "Exit virtualenv and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - functions -e fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - - set -e VIRTUAL_ENV - if test "$argv[1]" != "nondestructive" - # Self destruct! - functions -e deactivate - end -end - -# unset irrelevant variables -deactivate nondestructive - -set -gx VIRTUAL_ENV /home/vladi/Documents/flexmeasures/flexmeasures-env - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/"bin $PATH - -# unset PYTHONHOME if set -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # save the current fish_prompt function as the function _old_fish_prompt - functions -c fish_prompt _old_fish_prompt - - # with the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command - set -l old_status $status - - # Prompt override? - if test -n '(flexmeasures-env) ' - printf "%s%s" '(flexmeasures-env) ' (set_color normal) - else - # ...Otherwise, prepend env - set -l _checkbase (basename "$VIRTUAL_ENV") - if test $_checkbase = "__" - # special case for Aspen magic directories - # see https://aspen.io/ - printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) - else - printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) - end - end - - # Restore the return status of the previous command. - echo "exit $old_status" | . - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage b/flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage deleted file mode 120000 index 45bbe433cf..0000000000 --- a/flexmeasures-env/bin/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage +++ /dev/null @@ -1 +0,0 @@ -/home/vladi/Downloads/cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage \ No newline at end of file diff --git a/flexmeasures-env/bin/python b/flexmeasures-env/bin/python deleted file mode 120000 index a5e6c3cbfb..0000000000 --- a/flexmeasures-env/bin/python +++ /dev/null @@ -1 +0,0 @@ -cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage \ No newline at end of file diff --git a/flexmeasures-env/bin/python3 b/flexmeasures-env/bin/python3 deleted file mode 120000 index a5e6c3cbfb..0000000000 --- a/flexmeasures-env/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -cursor-0.45.14-build-250219jnihavxsz-x86_64.AppImage \ No newline at end of file diff --git a/flexmeasures-env/lib64 b/flexmeasures-env/lib64 deleted file mode 120000 index 7951405f85..0000000000 --- a/flexmeasures-env/lib64 +++ /dev/null @@ -1 +0,0 @@ -lib \ No newline at end of file diff --git a/flexmeasures-env/pyvenv.cfg b/flexmeasures-env/pyvenv.cfg deleted file mode 100644 index ffe410ba7f..0000000000 --- a/flexmeasures-env/pyvenv.cfg +++ /dev/null @@ -1,3 +0,0 @@ -home = /home/vladi/Downloads -include-system-site-packages = false -version = 3.8.10 diff --git a/flexmeasures.db b/flexmeasures.db new file mode 100644 index 0000000000000000000000000000000000000000..8a3522faaf758bcbc511de6b147671cbc2fde067 GIT binary patch literal 61440 zcmeI(U2ob}7{GDJ3j|8ScIC@Bnks>^O?hdxObXIvAHc8oC z8g=_f)4s{}33juKUG8Sm&N(K;HX-5K`lHg=K5st1=kcj6r^iPX$Fudb-k@W7`flQ* zL^7H9M%NRGL{@zB;(PsB5gSX_JK|?Dusv%toA`Y1&(-|DiM8e568Y8q@2juZ{>c56 z8)dh0)!ZkU^~`_iuUGySg?tb|009K%5x81dOKY2($q!FF>$Gj3p7+f`uWh^L(6t9{ zWH-NGH%bjdZVHI>e-C!%e*O*hvXoUL;JPr zaid-m9Ynh}`)}=${(ZgtqEvsSe`mbX3r?#jySbyycm(t2G|yx&2erCUKCH@K3gaq8 zy>1*Bb)&j(9N!E*&luy0sWB9oIW@+(%G4ODx*8)}y!tGc*4EdPzZ54yP{s1p)yC=+ z{J8l{a=T+WZT)4bzW;5hUf6m3SaHc#U8`eH74$9Fz32^EQw43y^~`qfjng&7KkLs$ z^c&?DhNN!}2Lro1qcG6S=}$V{-r7>95pH@*1%JS1Z`k#&6G{$gH9hCt)}PgC6{A#D zC(-MkIfIVfik@90F%;L_Y2?>dgr>}ylWO_s#Lx@sNHJU7UQKDndNSd3TlPEmr?$B9 zF|DE3Q~QBr*q#J%?ecY2h4^ZD5@L0oO|tXBRESmi9amK-kYD<;E!%AloWAGux)b&5 z;73SQ_j8l@wdIf@5qUs@Jh%_UZ?8bqX6~w=PHUQ${B=)(i2SoX_18)WLLIsTL6w`{ zsBfE5kI)hnEtJXAa@>B~8cj7+iDmVzrsIuFk@%ep{h(4SiS;b7rnN24@rEt?wxVA5 z&8)IO5d2f0XKC|mMy6^8_NP{zK$!|raD}@m(+jFl&Ufm(f;gd&2MeNWu-~=pG}J_m zk}Vc9Deb8?>$36N?x-Kx;>!J1w^h|lJC)X+Y$Pu;>gsde1znm}*K~h$&iu?$X_6kN zSGoG!_(7jhU9VMj|9oU->4ix@#ldn~+Y_UGJYzJmw0UCXy=k1fXS{KP+eRE072p57 znASEnk{<$>iki#KC`;29jjAE99H~E948%GSN@K@0o4u|(?AU|rbv0VvAM{$o=8SSF zb3KLex*Tdhk-N3`mY^D$u5G!!t|gYy(XF*EZ2RpT)kd~8n4{WMrmMwc`Z63R?e0=a zJKVTCX;lw?Ix6+$%VJX9RlU#ni(A|7oH|YOT-;KLmDvjKE`(zjE|dWrH#57#&Lobz zJK;D6XBF;2Eb-0oy;Gn+v35UQNNGo#Gbb47Y&&xDi{d6zKF=ps#W$J%Cn5guK>z^+ z5I_I{1Q0*~0R#|00D+hYtR)v7wss#q+G^Q5Us$JK$(Q^*|HmY_OalP~5I_I{1Q0*~ z0R#|00D-YUe(_-@`1~*5|BGMx=Ys$O2q1s}0tg_000IagfB*t97Lf1%dH#=afSDQs z2q1s}0tg_000IagfB*tQfaibG009ILKmY**5I_I{1Q0*~f%psX{2%`qGeZOrKmY** z5I_I{1Q0*~0R;H}|KtDy2q1s}0tg_000IagfB*vV7vTMW{A0`v5kLR|1Q0*~0R#|0 z009IL;Q5~%KmY**5I_I{1Q0*~0R#|0ApQdU{(t;q%nT7g009ILKmY**5I_I{1Q6i; zKRJK^0tg_000IagfB*srAb>#p1$h4-{}?kv1Q0*~0R#|0009ILKmY**c>hlhAb self.app.logger.warning(f"Database unavailable ({db_error}), using mock asset") asset = self._create_mock_asset() - # Create scheduler with device data - scheduler = scheduler_class(asset, return_multiple=True) - - # Pass the device data to the scheduler - scheduler.frbc_device_data = device_data + # Create and configure S2Scheduler directly with minimal setup + scheduler = self._create_s2_scheduler_with_frbc_data(device_data) schedule = scheduler.compute() for entry in schedule: @@ -400,9 +397,57 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> def _create_mock_asset(self): """Create a mock asset for testing when database is unavailable.""" + from datetime import datetime, timedelta + + # Create a simple mock object that has the minimum attributes expected by S2Scheduler class MockAsset: def __init__(self): self.id = 1 - self.name = "Mock Asset" + self.name = "Mock S2 Asset" + # Important: S2Scheduler.deserialize_config() accesses asset.attributes + self.attributes = { + "flex-model": {}, + "custom-scheduler": { + "module": "flexmeasures_s2.scheduler.schedulers", + "class": "S2Scheduler", + } + } + # Create minimal mock asset type to satisfy isinstance checks + self.generic_asset_type_id = 1 + self.generic_asset_type = type('MockAssetType', (), { + 'id': 1, + 'name': 'S2Device' + })() return MockAsset() + + + def _create_s2_scheduler_with_frbc_data(self, device_data): + """Create S2Scheduler directly with minimal setup, bypassing FlexMeasures validation.""" + from datetime import datetime, timedelta + + # Import S2Scheduler directly + scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] + + # Create a minimal scheduler instance by bypassing the constructor validation + scheduler = scheduler_class.__new__(scheduler_class) + + # Set minimal required attributes + scheduler.sensor = None + scheduler.asset = None + scheduler.start = datetime.now() + scheduler.end = datetime.now() + timedelta(hours=1) + scheduler.resolution = timedelta(minutes=15) + scheduler.belief_time = datetime.now() + scheduler.round_to_decimals = 6 + scheduler.flex_model = {} + scheduler.flex_context = {} + scheduler.fallback_scheduler_class = None + scheduler.info = {"scheduler": "S2Scheduler"} + scheduler.config_deserialized = True # Skip config deserialization + scheduler.return_multiple = True + + # Set the FRBC device data + scheduler.frbc_device_data = device_data + + return scheduler diff --git a/run-flexmeasures.sh b/run-flexmeasures.sh new file mode 100755 index 0000000000..20e5fc480f --- /dev/null +++ b/run-flexmeasures.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# FlexMeasures wrapper script with database connection fix +# This script sets the correct database URL via environment variable to bypass config file parsing issues + +export SQLALCHEMY_DATABASE_URI="postgresql://flexmeasures-user:FMPass@localhost:5432/flexmeasures-db" + +# Run flexmeasures with any arguments passed to this script +flexmeasures "$@" From 208ea6a7481f3a05d42c29318012380392f5781f Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 26 Aug 2025 14:40:40 +0200 Subject: [PATCH 027/171] Full pipeline: can now call the sceduler without errors on the server or client side Signed-off-by: Vlad Iftime --- flexmeasures.db | Bin 61440 -> 0 bytes run-flexmeasures.sh | 9 --------- 2 files changed, 9 deletions(-) delete mode 100644 flexmeasures.db delete mode 100755 run-flexmeasures.sh diff --git a/flexmeasures.db b/flexmeasures.db deleted file mode 100644 index 8a3522faaf758bcbc511de6b147671cbc2fde067..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 61440 zcmeI(U2ob}7{GDJ3j|8ScIC@Bnks>^O?hdxObXIvAHc8oC z8g=_f)4s{}33juKUG8Sm&N(K;HX-5K`lHg=K5st1=kcj6r^iPX$Fudb-k@W7`flQ* zL^7H9M%NRGL{@zB;(PsB5gSX_JK|?Dusv%toA`Y1&(-|DiM8e568Y8q@2juZ{>c56 z8)dh0)!ZkU^~`_iuUGySg?tb|009K%5x81dOKY2($q!FF>$Gj3p7+f`uWh^L(6t9{ zWH-NGH%bjdZVHI>e-C!%e*O*hvXoUL;JPr zaid-m9Ynh}`)}=${(ZgtqEvsSe`mbX3r?#jySbyycm(t2G|yx&2erCUKCH@K3gaq8 zy>1*Bb)&j(9N!E*&luy0sWB9oIW@+(%G4ODx*8)}y!tGc*4EdPzZ54yP{s1p)yC=+ z{J8l{a=T+WZT)4bzW;5hUf6m3SaHc#U8`eH74$9Fz32^EQw43y^~`qfjng&7KkLs$ z^c&?DhNN!}2Lro1qcG6S=}$V{-r7>95pH@*1%JS1Z`k#&6G{$gH9hCt)}PgC6{A#D zC(-MkIfIVfik@90F%;L_Y2?>dgr>}ylWO_s#Lx@sNHJU7UQKDndNSd3TlPEmr?$B9 zF|DE3Q~QBr*q#J%?ecY2h4^ZD5@L0oO|tXBRESmi9amK-kYD<;E!%AloWAGux)b&5 z;73SQ_j8l@wdIf@5qUs@Jh%_UZ?8bqX6~w=PHUQ${B=)(i2SoX_18)WLLIsTL6w`{ zsBfE5kI)hnEtJXAa@>B~8cj7+iDmVzrsIuFk@%ep{h(4SiS;b7rnN24@rEt?wxVA5 z&8)IO5d2f0XKC|mMy6^8_NP{zK$!|raD}@m(+jFl&Ufm(f;gd&2MeNWu-~=pG}J_m zk}Vc9Deb8?>$36N?x-Kx;>!J1w^h|lJC)X+Y$Pu;>gsde1znm}*K~h$&iu?$X_6kN zSGoG!_(7jhU9VMj|9oU->4ix@#ldn~+Y_UGJYzJmw0UCXy=k1fXS{KP+eRE072p57 znASEnk{<$>iki#KC`;29jjAE99H~E948%GSN@K@0o4u|(?AU|rbv0VvAM{$o=8SSF zb3KLex*Tdhk-N3`mY^D$u5G!!t|gYy(XF*EZ2RpT)kd~8n4{WMrmMwc`Z63R?e0=a zJKVTCX;lw?Ix6+$%VJX9RlU#ni(A|7oH|YOT-;KLmDvjKE`(zjE|dWrH#57#&Lobz zJK;D6XBF;2Eb-0oy;Gn+v35UQNNGo#Gbb47Y&&xDi{d6zKF=ps#W$J%Cn5guK>z^+ z5I_I{1Q0*~0R#|00D+hYtR)v7wss#q+G^Q5Us$JK$(Q^*|HmY_OalP~5I_I{1Q0*~ z0R#|00D-YUe(_-@`1~*5|BGMx=Ys$O2q1s}0tg_000IagfB*t97Lf1%dH#=afSDQs z2q1s}0tg_000IagfB*tQfaibG009ILKmY**5I_I{1Q0*~f%psX{2%`qGeZOrKmY** z5I_I{1Q0*~0R;H}|KtDy2q1s}0tg_000IagfB*vV7vTMW{A0`v5kLR|1Q0*~0R#|0 z009IL;Q5~%KmY**5I_I{1Q0*~0R#|0ApQdU{(t;q%nT7g009ILKmY**5I_I{1Q6i; zKRJK^0tg_000IagfB*srAb>#p1$h4-{}?kv1Q0*~0R#|0009ILKmY**c>hlhAb Date: Tue, 26 Aug 2025 15:08:29 +0200 Subject: [PATCH 028/171] Full pipeline: fixed alignement issue Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 57a7237c1c..bd144848f9 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -432,13 +432,29 @@ def _create_s2_scheduler_with_frbc_data(self, device_data): # Create a minimal scheduler instance by bypassing the constructor validation scheduler = scheduler_class.__new__(scheduler_class) - # Set minimal required attributes + # Create properly aligned start time based on 15-minute intervals + # This ensures the timestamps align with the timestep duration + now = datetime.now().replace(tzinfo=datetime.now().astimezone().tzinfo) + resolution = timedelta(minutes=15) + + # Align start time to the nearest 15-minute boundary + minutes_offset = now.minute % 15 + seconds_offset = now.second + microseconds_offset = now.microsecond + + start_aligned = now.replace( + minute=now.minute - minutes_offset, + second=0, + microsecond=0 + ) + + # Set minimal required attributes with aligned timestamps scheduler.sensor = None scheduler.asset = None - scheduler.start = datetime.now() - scheduler.end = datetime.now() + timedelta(hours=1) - scheduler.resolution = timedelta(minutes=15) - scheduler.belief_time = datetime.now() + scheduler.start = start_aligned + scheduler.end = start_aligned + timedelta(hours=1) + scheduler.resolution = resolution + scheduler.belief_time = start_aligned scheduler.round_to_decimals = 6 scheduler.flex_model = {} scheduler.flex_context = {} From 75521577363f1fa3d11aba7725a47eb3ae12c031 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 26 Aug 2025 16:22:37 +0200 Subject: [PATCH 029/171] Full pipline: FRBCInstrucions are generated and sent to the client. Still need check on the actual results Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws.py | 2 +- flexmeasures/ws/s2_ws_sync.py | 56 +++++++++++++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/flexmeasures/ws/s2_ws.py b/flexmeasures/ws/s2_ws.py index 89e73abc93..286ca6573c 100644 --- a/flexmeasures/ws/s2_ws.py +++ b/flexmeasures/ws/s2_ws.py @@ -21,7 +21,7 @@ ReceptionStatusValues, SelectControlType, ) -from s2python.communication.reception_status_awaiter import ReceptionStatusAwaiter +from s2python.reception_status_awaiter import ReceptionStatusAwaiter from s2python.message import S2Message from s2python.s2_parser import S2Parser from s2python.s2_validation_error import S2ValidationError diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index bd144848f9..8ceba5374e 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -380,8 +380,8 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> self.app.logger.warning(f"Database unavailable ({db_error}), using mock asset") asset = self._create_mock_asset() - # Create and configure S2Scheduler directly with minimal setup - scheduler = self._create_s2_scheduler_with_frbc_data(device_data) + # Create and configure S2Scheduler with the fixed logic from flexmeasures_s2 + scheduler = self._create_fixed_s2_scheduler_with_frbc_data(device_data) schedule = scheduler.compute() for entry in schedule: @@ -467,3 +467,55 @@ def _create_s2_scheduler_with_frbc_data(self, device_data): scheduler.frbc_device_data = device_data return scheduler + + def _create_fixed_s2_scheduler_with_frbc_data(self, device_data): + """Create S2Scheduler with the fixed logic from flexmeasures_s2 project.""" + from datetime import datetime, timedelta, timezone + + # Import the fixed S2Scheduler directly from flexmeasures_s2 + scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] + + # Create a minimal scheduler instance by bypassing the constructor validation + scheduler = scheduler_class.__new__(scheduler_class) + + # Create properly aligned start time based on 15-minute intervals + # This ensures the timestamps align with the timestep duration + now = datetime.now().replace(tzinfo=timezone.utc) + resolution = timedelta(minutes=15) + + # Align start time to the nearest 15-minute boundary + minutes_offset = now.minute % 15 + start_aligned = now.replace( + minute=now.minute - minutes_offset, + second=0, + microsecond=0 + ) + + # Set minimal required attributes with aligned timestamps + scheduler.sensor = None + scheduler.asset = None + scheduler.start = start_aligned + scheduler.end = start_aligned + timedelta(hours=1) + scheduler.resolution = resolution + scheduler.belief_time = start_aligned + scheduler.round_to_decimals = 6 + scheduler.flex_model = {} + scheduler.flex_context = {} + scheduler.fallback_scheduler_class = None + scheduler.info = {"scheduler": "S2Scheduler"} + scheduler.config_deserialized = True # Skip config deserialization + scheduler.return_multiple = True + + # Convert device_data to the format expected by the fixed scheduler + class MockFRBCDeviceData: + def __init__(self, data): + self.resource_id = data.resource_id + self.system_description = data.system_description + self.fill_level_target_profile = data.fill_level_target_profile + self.storage_status = data.storage_status + self.actuator_status = data.actuator_status + + # Set the FRBC device data + scheduler.frbc_device_data = MockFRBCDeviceData(device_data) + + return scheduler From 04dcaf8b11cfabc8e4ff8b25136a6880b78fe225 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Mon, 1 Sep 2025 04:46:38 +0200 Subject: [PATCH 030/171] Full pipline: FRBCInstrucions are generated correctly but the scheduler does just one step of improvement Signed-off-by: Vlad Iftime --- flexmeasures/app.py | 44 ++++++++ flexmeasures/ws/s2_ws_sync.py | 189 +++++++--------------------------- 2 files changed, 82 insertions(+), 151 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index c3087d751e..fe552394a4 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -256,6 +256,50 @@ def ws_connection_auth(): if auth_header.startswith("Bearer "): token = auth_header.removeprefix("Bearer ").strip() if token == app.config.get("WEBSOCKET_BEARER_TOKEN", None): + # Initialize S2Scheduler for this WebSocket connection if not already done + if not hasattr(s2_ws, 's2_scheduler'): + from datetime import datetime, timedelta, timezone + + # Get S2Scheduler class from registered schedulers + scheduler_class = app.data_generators["scheduler"]["S2Scheduler"] + + # Create scheduler instance with minimal setup for WebSocket usage + scheduler = scheduler_class.__new__(scheduler_class) + + # Set basic time parameters + now = datetime.now(timezone.utc) + resolution = timedelta(minutes=5) # Match example_schedule_frbc.py resolution + + # Align to 5-minute boundary + minutes_offset = now.minute % 5 + start_aligned = now.replace( + minute=now.minute - minutes_offset, + second=0, + microsecond=0 + ) + + # Set required attributes for scheduler + scheduler.sensor = None + scheduler.asset = None + scheduler.start = start_aligned + scheduler.end = start_aligned + timedelta(hours=24) # 24-hour planning window + scheduler.resolution = resolution + scheduler.belief_time = start_aligned + scheduler.round_to_decimals = 6 + scheduler.flex_model = {} + scheduler.flex_context = {} + scheduler.fallback_scheduler_class = None + scheduler.info = {"scheduler": "S2Scheduler"} + scheduler.config_deserialized = True + scheduler.return_multiple = True + + # Initialize device states storage + scheduler.device_states = {} + + # Attach scheduler to WebSocket server + s2_ws.s2_scheduler = scheduler + app.logger.info("S2Scheduler initialized for WebSocket connections") + return # Let other before_request hooks handle it app.logger.info( diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 8ceba5374e..da438210e6 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -216,9 +216,19 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: finally: if client_id in self._connections: del self._connections[client_id] - # Clean up websocket to resource mapping + # Clean up websocket to resource mapping and device states if websocket in self._websocket_to_resource: + resource_id = self._websocket_to_resource[websocket] del self._websocket_to_resource[websocket] + + # Clean up device data + if resource_id in self._device_data: + del self._device_data[resource_id] + + # Clean up device state from scheduler if available + if hasattr(self, 's2_scheduler') and self.s2_scheduler is not None: + self.s2_scheduler.remove_device_state(resource_id) + self.app.logger.info("Client %s disconnected (sync)", client_id) def respond_with_reception_status( @@ -365,157 +375,34 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> self.app.logger.info(f"All data received for device {resource_id}, generating instructions") try: - # Get scheduler and generate instructions - scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] - from flexmeasures import Asset - from flexmeasures.data import db - - # Try to get asset from database, fallback to mock if DB unavailable - try: - asset = db.session.get(Asset, 1) - if asset is None: - self.app.logger.warning("No asset found with ID 1, using mock asset") - asset = self._create_mock_asset() - except Exception as db_error: - self.app.logger.warning(f"Database unavailable ({db_error}), using mock asset") - asset = self._create_mock_asset() - - # Create and configure S2Scheduler with the fixed logic from flexmeasures_s2 - scheduler = self._create_fixed_s2_scheduler_with_frbc_data(device_data) - - schedule = scheduler.compute() - for entry in schedule: - if isinstance(entry, FRBCInstruction): - self._send_and_forget(entry, websocket) - elif isinstance(entry, dict) and "sensor" in entry: - # todo: save entry["data"] to sensor (see the code block starting in data/services/scheduling/make_schedule line 598) - pass + # Use the S2Scheduler to create and store device state + if hasattr(self, 's2_scheduler') and self.s2_scheduler is not None: + # Create S2FrbcDeviceState from FRBC messages and store in scheduler + device_state = self.s2_scheduler.create_device_state_from_frbc_messages( + resource_id=resource_id, + system_description=device_data.system_description, + fill_level_target_profile=device_data.fill_level_target_profile, + storage_status=device_data.storage_status, + actuator_status=device_data.actuator_status + ) + + # Generate instructions using the scheduler + schedule_results = self.s2_scheduler.compute() + + # Send generated instructions + for result in schedule_results: + if isinstance(result, FRBCInstruction): + self._send_and_forget(result, websocket) + self.app.logger.info(f"Sent FRBC instruction: {result.to_json()}") + elif isinstance(result, dict) and "sensor" in result: + # TODO: save result["data"] to sensor if needed for FlexMeasures + pass + else: + # Scheduler not available - log warning and skip instruction generation + self.app.logger.warning(f"S2Scheduler not available for device {resource_id}, cannot generate instructions") except Exception as e: self.app.logger.error(f"Error generating instructions for device {resource_id}: {e}") + import traceback + self.app.logger.error(f"Traceback: {traceback.format_exc()}") # Continue processing other devices - - def _create_mock_asset(self): - """Create a mock asset for testing when database is unavailable.""" - from datetime import datetime, timedelta - - # Create a simple mock object that has the minimum attributes expected by S2Scheduler - class MockAsset: - def __init__(self): - self.id = 1 - self.name = "Mock S2 Asset" - # Important: S2Scheduler.deserialize_config() accesses asset.attributes - self.attributes = { - "flex-model": {}, - "custom-scheduler": { - "module": "flexmeasures_s2.scheduler.schedulers", - "class": "S2Scheduler", - } - } - # Create minimal mock asset type to satisfy isinstance checks - self.generic_asset_type_id = 1 - self.generic_asset_type = type('MockAssetType', (), { - 'id': 1, - 'name': 'S2Device' - })() - - return MockAsset() - - - def _create_s2_scheduler_with_frbc_data(self, device_data): - """Create S2Scheduler directly with minimal setup, bypassing FlexMeasures validation.""" - from datetime import datetime, timedelta - - # Import S2Scheduler directly - scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] - - # Create a minimal scheduler instance by bypassing the constructor validation - scheduler = scheduler_class.__new__(scheduler_class) - - # Create properly aligned start time based on 15-minute intervals - # This ensures the timestamps align with the timestep duration - now = datetime.now().replace(tzinfo=datetime.now().astimezone().tzinfo) - resolution = timedelta(minutes=15) - - # Align start time to the nearest 15-minute boundary - minutes_offset = now.minute % 15 - seconds_offset = now.second - microseconds_offset = now.microsecond - - start_aligned = now.replace( - minute=now.minute - minutes_offset, - second=0, - microsecond=0 - ) - - # Set minimal required attributes with aligned timestamps - scheduler.sensor = None - scheduler.asset = None - scheduler.start = start_aligned - scheduler.end = start_aligned + timedelta(hours=1) - scheduler.resolution = resolution - scheduler.belief_time = start_aligned - scheduler.round_to_decimals = 6 - scheduler.flex_model = {} - scheduler.flex_context = {} - scheduler.fallback_scheduler_class = None - scheduler.info = {"scheduler": "S2Scheduler"} - scheduler.config_deserialized = True # Skip config deserialization - scheduler.return_multiple = True - - # Set the FRBC device data - scheduler.frbc_device_data = device_data - - return scheduler - - def _create_fixed_s2_scheduler_with_frbc_data(self, device_data): - """Create S2Scheduler with the fixed logic from flexmeasures_s2 project.""" - from datetime import datetime, timedelta, timezone - - # Import the fixed S2Scheduler directly from flexmeasures_s2 - scheduler_class = self.app.data_generators["scheduler"]["S2Scheduler"] - - # Create a minimal scheduler instance by bypassing the constructor validation - scheduler = scheduler_class.__new__(scheduler_class) - - # Create properly aligned start time based on 15-minute intervals - # This ensures the timestamps align with the timestep duration - now = datetime.now().replace(tzinfo=timezone.utc) - resolution = timedelta(minutes=15) - - # Align start time to the nearest 15-minute boundary - minutes_offset = now.minute % 15 - start_aligned = now.replace( - minute=now.minute - minutes_offset, - second=0, - microsecond=0 - ) - - # Set minimal required attributes with aligned timestamps - scheduler.sensor = None - scheduler.asset = None - scheduler.start = start_aligned - scheduler.end = start_aligned + timedelta(hours=1) - scheduler.resolution = resolution - scheduler.belief_time = start_aligned - scheduler.round_to_decimals = 6 - scheduler.flex_model = {} - scheduler.flex_context = {} - scheduler.fallback_scheduler_class = None - scheduler.info = {"scheduler": "S2Scheduler"} - scheduler.config_deserialized = True # Skip config deserialization - scheduler.return_multiple = True - - # Convert device_data to the format expected by the fixed scheduler - class MockFRBCDeviceData: - def __init__(self, data): - self.resource_id = data.resource_id - self.system_description = data.system_description - self.fill_level_target_profile = data.fill_level_target_profile - self.storage_status = data.storage_status - self.actuator_status = data.actuator_status - - # Set the FRBC device data - scheduler.frbc_device_data = MockFRBCDeviceData(device_data) - - return scheduler From 07fd98eb2c1d3497134e749f01be1ece9eb36274 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 3 Sep 2025 13:12:45 +0200 Subject: [PATCH 031/171] Removed unnecessary s2python versioning Signed-off-by: Vlad Iftime --- requirements/app.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/app.in b/requirements/app.in index dcc99cc21a..d80428d2fc 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -68,4 +68,4 @@ werkzeug vl-convert-python Pillow>=10.0.1 # https://github.com/FlexMeasures/flexmeasures/security/dependabot/91 flask-sock -s2-python[ws] @ git+https://github.com/flexiblepower/s2-python.git@feat/flask_server \ No newline at end of file +s2-python[ws] From 4ef8b27294ac013d05983bc2e541ca464db5c6fd Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 3 Sep 2025 13:13:37 +0200 Subject: [PATCH 032/171] Removed unnecessary async version of ws message handling Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws.py | 309 --------------------------------------- 1 file changed, 309 deletions(-) delete mode 100644 flexmeasures/ws/s2_ws.py diff --git a/flexmeasures/ws/s2_ws.py b/flexmeasures/ws/s2_ws.py deleted file mode 100644 index 286ca6573c..0000000000 --- a/flexmeasures/ws/s2_ws.py +++ /dev/null @@ -1,309 +0,0 @@ -""" -Flask implementation of the S2 protocol WebSocket server. -""" - -import asyncio -import json -import logging -import traceback -import uuid -from typing import Any, Callable, Dict, Optional, Type - -from flask import Flask -from flask_sock import ConnectionClosed, Sock - -from s2python.common import ( - ControlType, - EnergyManagementRole, - Handshake, - HandshakeResponse, - ReceptionStatus, - ReceptionStatusValues, - SelectControlType, -) -from s2python.reception_status_awaiter import ReceptionStatusAwaiter -from s2python.message import S2Message -from s2python.s2_parser import S2Parser -from s2python.s2_validation_error import S2ValidationError -from flexmeasures.ws import sock - -# Set up logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("S2FlaskWSServer") - - -class MessageHandlers: - """Class to manage message handlers for different message types.""" - - handlers: Dict[Type[S2Message], Callable] - - def __init__(self) -> None: - self.handlers = {} - - async def handle_message( - self, - server: "S2FlaskWSServer", - msg: S2Message, - websocket: Sock, - ) -> None: - """Handle the S2 message using the registered handler. - Args: - server: The server instance handling the message - msg: The S2 message to handle - websocket: The websocket connection to the client - """ - handler = self.handlers.get(type(msg)) - if handler is not None: - try: - if asyncio.iscoroutinefunction(handler): - await handler(server, msg, websocket) - else: - - def do_message() -> None: - handler(server, msg, websocket) - - eventloop = asyncio.get_event_loop() - await eventloop.run_in_executor(executor=None, func=do_message) - except Exception: - message_id = getattr(msg, "message_id", "N/A") - logger.error( - "While processing message %s an unrecoverable error occurred.", - message_id, - ) - logger.error("Error: %s", traceback.format_exc()) - await server.respond_with_reception_status( - subject_message_id=getattr( - msg, - "message_id", - uuid.UUID("00000000-0000-0000-0000-000000000000"), - ), - status=ReceptionStatusValues.PERMANENT_ERROR, - diagnostic_label=f"While processing message {message_id} an unrecoverable error occurred.", - websocket=websocket, - ) - raise - else: - logger.warning( - "Received a message of type %s but no handler is registered. Ignoring the message.", - type(msg), - ) - - def register_handler(self, msg_type: Type[S2Message], handler: Callable[..., Any]) -> None: - """Register a handler for a specific message type. - Args: - msg_type: The message type to handle - handler: The handler function - """ - self.handlers[msg_type] = handler - - -class S2FlaskWSServer: - """Flask-based WebSocket server implementation for S2 protocol.""" - - def __init__( - self, - role: EnergyManagementRole = EnergyManagementRole.CEM, - ws_path: str = "/s2", - app: Optional[Flask] = None, - sock: Optional[Sock] = None, - ) -> None: - """Initialize the WebSocket server. - Args: - app: The Flask app to use - sock: The Sock instance to use - role: The role of this server (CEM or RM) - ws_path: The path for the WebSocket endpoint. - """ - - self.role = role - self.ws_path = ws_path - - self.app = app if app else Flask(__name__) - self.sock = sock if sock else Sock(self.app) - - self._handlers = MessageHandlers() - self.s2_parser = S2Parser() - self._connections: Dict[str, Sock] = {} - self.reception_status_awaiter = ReceptionStatusAwaiter() - - self._register_default_handlers() - self.sock.route(self.ws_path)(self._ws_handler) - - def _register_default_handlers(self) -> None: - """Register default message handlers.""" - self._handlers.register_handler(Handshake, self.handle_handshake) - self._handlers.register_handler(HandshakeResponse, self.handle_handshake_response) - self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) - - def _ws_handler(self, ws: Sock) -> None: - """ - Wrapper to run the async websocket handler from a synchronous context. - This is required for Flask's development server. An ASGI server would - be able to run the async handler directly. - """ - try: - self.app.logger.info("Received connection from client") - asyncio.run(self._handle_websocket_connection(ws)) - except Exception as e: - self.app.logger.error("Error in websocket handler: %s", e) - - async def _handle_websocket_connection(self, websocket: Sock) -> None: - """Handle incoming WebSocket connections.""" - client_id = str(uuid.uuid4()) - self.app.logger.info("Client %s connected.", client_id) - self._connections[client_id] = websocket - - try: - while True: - message = await websocket.receive() - try: - s2_msg = self.s2_parser.parse_as_any_message(message) - if isinstance(s2_msg, ReceptionStatus): - await self.reception_status_awaiter.receive_reception_status(s2_msg) - continue - except json.JSONDecodeError: - await self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Not valid json.", - websocket=websocket, - ) - continue - try: - await self._handlers.handle_message(self, s2_msg, websocket) - except json.JSONDecodeError: - await self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Not valid json.", - websocket=websocket, - ) - except S2ValidationError as e: - json_msg = json.loads(message) - message_id = json_msg.get("message_id") - if message_id: - await self.respond_with_reception_status( - subject_message_id=message_id, - status=ReceptionStatusValues.INVALID_MESSAGE, - diagnostic_label=str(e), - websocket=websocket, - ) - else: - await self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Message appears valid json but could not find a message_id field.", - websocket=websocket, - ) - except Exception as e: - self.app.logger.error("Error processing message: %s", str(e)) - raise - except ConnectionClosed: - self.app.logger.info("Connection with client %s closed", client_id) - finally: - if client_id in self._connections: - del self._connections[client_id] - self.app.logger.info("Client %s disconnected", client_id) - - async def respond_with_reception_status( - self, - subject_message_id: uuid.UUID, - status: ReceptionStatusValues, - diagnostic_label: str, - websocket: Sock, - ) -> None: - """Send a reception status response.""" - response = ReceptionStatus( - subject_message_id=subject_message_id, - status=status, - diagnostic_label=diagnostic_label, - ) - self.app.logger.info("Sending reception status %s for message %s", status, subject_message_id) - try: - await websocket.send(response.to_json()) - except ConnectionClosed: - self.app.logger.warning("Connection closed while sending reception status") - - async def send_msg_and_await_reception_status_async( - self, - s2_msg: S2Message, - websocket: Sock, - timeout_reception_status: float = 20.0, - raise_on_error: bool = True, - ) -> ReceptionStatus: - """Send a message and await a reception status.""" - await self._send_and_forget(s2_msg, websocket) - message_id = getattr(s2_msg, "message_id", uuid.UUID("00000000-0000-0000-0000-000000000000")) - try: - await asyncio.wait_for(websocket.receive(), timeout=timeout_reception_status) - # Assuming the response is the correct reception status - return ReceptionStatus( - subject_message_id=message_id, - status=ReceptionStatusValues.OK, - diagnostic_label="Reception status received.", - ) - except asyncio.TimeoutError: - if raise_on_error: - raise TimeoutError(f"Did not receive a reception status on time for {message_id}") - return ReceptionStatus( - subject_message_id=message_id, - status=ReceptionStatusValues.PERMANENT_ERROR, - diagnostic_label="Timeout waiting for reception status.", - ) - except ConnectionClosed: - return ReceptionStatus( - subject_message_id=message_id, - status=ReceptionStatusValues.OK, - diagnostic_label="Connection closed, assuming OK status.", - ) - - async def handle_handshake(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: - """Handle handshake messages.""" - if not isinstance(message, Handshake): - return - self.app.logger.info("Received Handshake: %s", message.to_json()) - handshake_response = HandshakeResponse( - message_id=message.message_id, - selected_protocol_version=( - message.supported_protocol_versions[0] if message.supported_protocol_versions else "2.0.0" - ), # TODO: proper version negotiation - ) - await self._send_and_forget(handshake_response, websocket) - - await self.respond_with_reception_status( - subject_message_id=message.message_id, - status=ReceptionStatusValues.OK, - diagnostic_label="Handshake received", - websocket=websocket, - ) - - async def handle_handshake_response(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: - """Handle handshake response messages.""" - if not isinstance(message, HandshakeResponse): - return - self.app.logger.debug("Received HandshakeResponse: %s", message.to_json()) - # Send ReceptionStatus (OK) for the HandshakeResponse message - await self.respond_with_reception_status( - subject_message_id=message.message_id, - status=ReceptionStatusValues.OK, - diagnostic_label="HandshakeResponse processed okay.", - websocket=websocket, - ) - - async def handle_reception_status(self, _: "S2FlaskWSServer", message: S2Message, websocket: Sock) -> None: - """Handle reception status messages.""" - if not isinstance(message, ReceptionStatus): - return - self.app.logger.info("Received ReceptionStatus in handle_reception_status: %s", message.to_json()) - - async def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: - """Send a message and forget about it.""" - try: - await websocket.send(s2_msg.to_json()) - except ConnectionClosed: - self.app.logger.warning("Connection closed while sending message") - - async def send_select_control_type(self, control_type: ControlType, websocket: Sock) -> None: - """Select the control type.""" - select_control_type = SelectControlType(message_id=uuid.uuid4(), control_type=control_type) - await self._send_and_forget(select_control_type, websocket) From 4e554b4c0efb426142b8a6239831a7649cf97458 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 4 Sep 2025 15:23:44 +0200 Subject: [PATCH 033/171] fix: do not crash in `finally` if s2_scheduler has no remove_device_state method Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 97 +++++++++++++++++++++++------------ 1 file changed, 63 insertions(+), 34 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index da438210e6..85190c3917 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -39,14 +39,14 @@ class FRBCDeviceData: """Class to store FRBC device data received from Resource Manager.""" - + def __init__(self): self.system_description: Optional[FRBCSystemDescription] = None self.fill_level_target_profile: Optional[FRBCFillLevelTargetProfile] = None self.storage_status: Optional[FRBCStorageStatus] = None self.actuator_status: Optional[FRBCActuatorStatus] = None self.resource_id: Optional[str] = None - + def is_complete(self) -> bool: """Check if we have received all necessary data to generate instructions.""" return ( @@ -123,8 +123,12 @@ def __init__( self._handlers = MessageHandlersSync() self.s2_parser = S2Parser() self._connections: Dict[str, Sock] = {} - self._device_data: Dict[str, FRBCDeviceData] = {} # Store device data by resource_id - self._websocket_to_resource: Dict[Sock, str] = {} # Map websocket to resource_id + self._device_data: Dict[str, FRBCDeviceData] = ( + {} + ) # Store device data by resource_id + self._websocket_to_resource: Dict[Sock, str] = ( + {} + ) # Map websocket to resource_id self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) @@ -135,10 +139,18 @@ def _register_default_handlers(self) -> None: ResourceManagerDetails, self.handle_ResourceManagerDetails ) # Register FRBC message handlers - self._handlers.register_handler(FRBCSystemDescription, self.handle_frbc_system_description) - self._handlers.register_handler(FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile) - self._handlers.register_handler(FRBCStorageStatus, self.handle_frbc_storage_status) - self._handlers.register_handler(FRBCActuatorStatus, self.handle_frbc_actuator_status) + self._handlers.register_handler( + FRBCSystemDescription, self.handle_frbc_system_description + ) + self._handlers.register_handler( + FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile + ) + self._handlers.register_handler( + FRBCStorageStatus, self.handle_frbc_storage_status + ) + self._handlers.register_handler( + FRBCActuatorStatus, self.handle_frbc_actuator_status + ) def _ws_handler(self, ws: Sock) -> None: try: @@ -220,15 +232,19 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: if websocket in self._websocket_to_resource: resource_id = self._websocket_to_resource[websocket] del self._websocket_to_resource[websocket] - + # Clean up device data if resource_id in self._device_data: del self._device_data[resource_id] - + # Clean up device state from scheduler if available - if hasattr(self, 's2_scheduler') and self.s2_scheduler is not None: + if ( + hasattr(self, "s2_scheduler") + and self.s2_scheduler is not None + and hasattr(self.s2_scheduler, "remove_device_state") + ): self.s2_scheduler.remove_device_state(resource_id) - + self.app.logger.info("Client %s disconnected (sync)", client_id) def respond_with_reception_status( @@ -303,7 +319,7 @@ def handle_ResourceManagerDetails( # Store the resource_id from ResourceManagerDetails for device identification resource_id = str(message.resource_id) self._websocket_to_resource[websocket] = resource_id - + if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() self._device_data[resource_id].resource_id = resource_id @@ -314,12 +330,12 @@ def handle_frbc_system_description( if not isinstance(message, FRBCSystemDescription): return self.app.logger.info("Received FRBCSystemDescription: %s", message.to_json()) - + # Get resource_id from websocket mapping resource_id = self._websocket_to_resource.get(websocket, "default_resource") if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - + self._device_data[resource_id].system_description = message self._check_and_generate_instructions(resource_id, websocket) @@ -328,12 +344,14 @@ def handle_frbc_fill_level_target_profile( ) -> None: if not isinstance(message, FRBCFillLevelTargetProfile): return - self.app.logger.info("Received FRBCFillLevelTargetProfile: %s", message.to_json()) - + self.app.logger.info( + "Received FRBCFillLevelTargetProfile: %s", message.to_json() + ) + resource_id = self._websocket_to_resource.get(websocket, "default_resource") if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - + self._device_data[resource_id].fill_level_target_profile = message self._check_and_generate_instructions(resource_id, websocket) @@ -343,11 +361,11 @@ def handle_frbc_storage_status( if not isinstance(message, FRBCStorageStatus): return self.app.logger.info("Received FRBCStorageStatus: %s", message.to_json()) - + resource_id = self._websocket_to_resource.get(websocket, "default_resource") if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - + self._device_data[resource_id].storage_status = message self._check_and_generate_instructions(resource_id, websocket) @@ -357,52 +375,63 @@ def handle_frbc_actuator_status( if not isinstance(message, FRBCActuatorStatus): return self.app.logger.info("Received FRBCActuatorStatus: %s", message.to_json()) - + resource_id = self._websocket_to_resource.get(websocket, "default_resource") if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - + self._device_data[resource_id].actuator_status = message self._check_and_generate_instructions(resource_id, websocket) - def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> None: + def _check_and_generate_instructions( + self, resource_id: str, websocket: Sock + ) -> None: """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") return - - self.app.logger.info(f"All data received for device {resource_id}, generating instructions") - + + self.app.logger.info( + f"All data received for device {resource_id}, generating instructions" + ) + try: # Use the S2Scheduler to create and store device state - if hasattr(self, 's2_scheduler') and self.s2_scheduler is not None: + if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: # Create S2FrbcDeviceState from FRBC messages and store in scheduler device_state = self.s2_scheduler.create_device_state_from_frbc_messages( resource_id=resource_id, system_description=device_data.system_description, fill_level_target_profile=device_data.fill_level_target_profile, storage_status=device_data.storage_status, - actuator_status=device_data.actuator_status + actuator_status=device_data.actuator_status, ) - + # Generate instructions using the scheduler schedule_results = self.s2_scheduler.compute() - + # Send generated instructions for result in schedule_results: if isinstance(result, FRBCInstruction): self._send_and_forget(result, websocket) - self.app.logger.info(f"Sent FRBC instruction: {result.to_json()}") + self.app.logger.info( + f"Sent FRBC instruction: {result.to_json()}" + ) elif isinstance(result, dict) and "sensor" in result: # TODO: save result["data"] to sensor if needed for FlexMeasures pass else: # Scheduler not available - log warning and skip instruction generation - self.app.logger.warning(f"S2Scheduler not available for device {resource_id}, cannot generate instructions") - + self.app.logger.warning( + f"S2Scheduler not available for device {resource_id}, cannot generate instructions" + ) + except Exception as e: - self.app.logger.error(f"Error generating instructions for device {resource_id}: {e}") + self.app.logger.error( + f"Error generating instructions for device {resource_id}: {e}" + ) import traceback + self.app.logger.error(f"Traceback: {traceback.format_exc()}") # Continue processing other devices From 5bb90ffa7a4599fd55bd858c388bc8f9abeb8f08 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 12:04:10 +0200 Subject: [PATCH 034/171] dev: add debug statements explaining why the scheduling is not triggered (because the device state may not be complete yet) Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 85190c3917..77b825ee17 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -388,6 +388,18 @@ def _check_and_generate_instructions( ) -> None: """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) + self.app.logger.debug( + f"System description? {'Go flight!' if device_data.system_description is not None else 'hold on..'}" + ) + self.app.logger.debug( + f"Fill level target profile? {'Go flight!' if device_data.fill_level_target_profile is not None else 'hold on..'}" + ) + self.app.logger.debug( + f"Storage status? {'Go flight!' if device_data.storage_status is not None else 'hold on..'}" + ) + self.app.logger.debug( + f"Actuator status? {'Go flight!' if device_data.actuator_status is not None else 'hold on..'}" + ) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") return From ff6f42fcb2e3c62c920f561b84f36ed13eeb70c0 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 12:05:54 +0200 Subject: [PATCH 035/171] fix: plural in method name Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 77b825ee17..3595300386 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -412,7 +412,7 @@ def _check_and_generate_instructions( # Use the S2Scheduler to create and store device state if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: # Create S2FrbcDeviceState from FRBC messages and store in scheduler - device_state = self.s2_scheduler.create_device_state_from_frbc_messages( + device_state = self.s2_scheduler.create_device_states_from_frbc_messages( resource_id=resource_id, system_description=device_data.system_description, fill_level_target_profile=device_data.fill_level_target_profile, From 3538acfb951a3a16d31860f01c055979e77681bb Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 12:08:35 +0200 Subject: [PATCH 036/171] fix: fix method name Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 3595300386..72e89f5efa 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -412,7 +412,7 @@ def _check_and_generate_instructions( # Use the S2Scheduler to create and store device state if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: # Create S2FrbcDeviceState from FRBC messages and store in scheduler - device_state = self.s2_scheduler.create_device_states_from_frbc_messages( + device_state = self.s2_scheduler.create_device_states_from_frbc_data( resource_id=resource_id, system_description=device_data.system_description, fill_level_target_profile=device_data.fill_level_target_profile, From afffdd9d2999225ba502bc1f3ab99e83949871f2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 12:17:25 +0200 Subject: [PATCH 037/171] dev: comment out a call Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 72e89f5efa..88ed42c6fb 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -412,13 +412,13 @@ def _check_and_generate_instructions( # Use the S2Scheduler to create and store device state if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: # Create S2FrbcDeviceState from FRBC messages and store in scheduler - device_state = self.s2_scheduler.create_device_states_from_frbc_data( - resource_id=resource_id, - system_description=device_data.system_description, - fill_level_target_profile=device_data.fill_level_target_profile, - storage_status=device_data.storage_status, - actuator_status=device_data.actuator_status, - ) + # self.device_state = self.s2_scheduler.create_device_states_from_frbc_data( + # resource_id=resource_id, + # system_description=device_data.system_description, + # fill_level_target_profile=device_data.fill_level_target_profile, + # storage_status=device_data.storage_status, + # actuator_status=device_data.actuator_status, + # ) # Generate instructions using the scheduler schedule_results = self.s2_scheduler.compute() From d9c078b745864a75099d48ac0aff44c059408785 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 12:21:14 +0200 Subject: [PATCH 038/171] feat: visual aides Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 88ed42c6fb..3ad3b2a371 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -389,16 +389,16 @@ def _check_and_generate_instructions( """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) self.app.logger.debug( - f"System description? {'Go flight!' if device_data.system_description is not None else 'hold on..'}" + f"System description? {'Go flight! โœ…' if device_data.system_description is not None else 'hold on.. โŒ'}" ) self.app.logger.debug( - f"Fill level target profile? {'Go flight!' if device_data.fill_level_target_profile is not None else 'hold on..'}" + f"Fill level target profile? {'Go flight! โœ…' if device_data.fill_level_target_profile is not None else 'hold on.. โŒ'}" ) self.app.logger.debug( - f"Storage status? {'Go flight!' if device_data.storage_status is not None else 'hold on..'}" + f"Storage status? {'Go flight! โœ…' if device_data.storage_status is not None else 'hold on.. โŒ'}" ) self.app.logger.debug( - f"Actuator status? {'Go flight!' if device_data.actuator_status is not None else 'hold on..'}" + f"Actuator status? {'Go flight! โœ…' if device_data.actuator_status is not None else 'hold on.. โŒ'}" ) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") From 0a50d5e9a179932d68c1aa6b39d9d69d924d8a5f Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 14:39:41 +0200 Subject: [PATCH 039/171] style: less obtrusive visual aides Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 3ad3b2a371..d293701148 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -389,16 +389,16 @@ def _check_and_generate_instructions( """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) self.app.logger.debug( - f"System description? {'Go flight! โœ…' if device_data.system_description is not None else 'hold on.. โŒ'}" + f"System description? {'Go flight! โœ“' if device_data.system_description is not None else 'hold on.. โœ—'}" ) self.app.logger.debug( - f"Fill level target profile? {'Go flight! โœ…' if device_data.fill_level_target_profile is not None else 'hold on.. โŒ'}" + f"Fill level target profile? {'Go flight! โœ“' if device_data.fill_level_target_profile is not None else 'hold on.. โœ—'}" ) self.app.logger.debug( - f"Storage status? {'Go flight! โœ…' if device_data.storage_status is not None else 'hold on.. โŒ'}" + f"Storage status? {'Go flight! โœ“' if device_data.storage_status is not None else 'hold on.. โœ—'}" ) self.app.logger.debug( - f"Actuator status? {'Go flight! โœ…' if device_data.actuator_status is not None else 'hold on.. โŒ'}" + f"Actuator status? {'Go flight! โœ“' if device_data.actuator_status is not None else 'hold on.. โœ—'}" ) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") From e5cc197dc5039cbf8d88061afb001ba645419a76 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 15:57:56 +0200 Subject: [PATCH 040/171] Revert "style: less obtrusive visual aides" This reverts commit 0a50d5e9a179932d68c1aa6b39d9d69d924d8a5f. --- flexmeasures/ws/s2_ws_sync.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index d293701148..3ad3b2a371 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -389,16 +389,16 @@ def _check_and_generate_instructions( """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) self.app.logger.debug( - f"System description? {'Go flight! โœ“' if device_data.system_description is not None else 'hold on.. โœ—'}" + f"System description? {'Go flight! โœ…' if device_data.system_description is not None else 'hold on.. โŒ'}" ) self.app.logger.debug( - f"Fill level target profile? {'Go flight! โœ“' if device_data.fill_level_target_profile is not None else 'hold on.. โœ—'}" + f"Fill level target profile? {'Go flight! โœ…' if device_data.fill_level_target_profile is not None else 'hold on.. โŒ'}" ) self.app.logger.debug( - f"Storage status? {'Go flight! โœ“' if device_data.storage_status is not None else 'hold on.. โœ—'}" + f"Storage status? {'Go flight! โœ…' if device_data.storage_status is not None else 'hold on.. โŒ'}" ) self.app.logger.debug( - f"Actuator status? {'Go flight! โœ“' if device_data.actuator_status is not None else 'hold on.. โœ—'}" + f"Actuator status? {'Go flight! โœ…' if device_data.actuator_status is not None else 'hold on.. โŒ'}" ) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") From 14ea5fa61fabe860b78172d6cf06b63ba4895765 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 15:58:18 +0200 Subject: [PATCH 041/171] style: align visual aides Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 3ad3b2a371..2455809e75 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -389,16 +389,16 @@ def _check_and_generate_instructions( """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) self.app.logger.debug( - f"System description? {'Go flight! โœ…' if device_data.system_description is not None else 'hold on.. โŒ'}" + f"System description? {'โœ… Go flight!' if device_data.system_description is not None else 'โŒ Hold on..'}" ) self.app.logger.debug( - f"Fill level target profile? {'Go flight! โœ…' if device_data.fill_level_target_profile is not None else 'hold on.. โŒ'}" + f"Fill level target profile? {'โœ… Go flight!' if device_data.fill_level_target_profile is not None else 'โŒ Hold on..'}" ) self.app.logger.debug( - f"Storage status? {'Go flight! โœ…' if device_data.storage_status is not None else 'hold on.. โŒ'}" + f"Storage status? {'โœ… Go flight!' if device_data.storage_status is not None else 'โŒ Hold on..'}" ) self.app.logger.debug( - f"Actuator status? {'Go flight! โœ…' if device_data.actuator_status is not None else 'hold on.. โŒ'}" + f"Actuator status? {'โœ… Go flight!' if device_data.actuator_status is not None else 'โŒ Hold on..'}" ) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") From 001a16ff466e9889fb2e93dc386fdad4b3ddaa65 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:02:14 +0200 Subject: [PATCH 042/171] dev: try setting s2_scheduler.device_data Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 2455809e75..d114350325 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -412,13 +412,14 @@ def _check_and_generate_instructions( # Use the S2Scheduler to create and store device state if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: # Create S2FrbcDeviceState from FRBC messages and store in scheduler - # self.device_state = self.s2_scheduler.create_device_states_from_frbc_data( - # resource_id=resource_id, - # system_description=device_data.system_description, - # fill_level_target_profile=device_data.fill_level_target_profile, - # storage_status=device_data.storage_status, - # actuator_status=device_data.actuator_status, - # ) + self.s2_scheduler.device_data = device_data + self.s2_scheduler.device_state = self.s2_scheduler.create_device_states_from_frbc_data( + # resource_id=resource_id, + # system_description=device_data.system_description, + # fill_level_target_profile=device_data.fill_level_target_profile, + # storage_status=device_data.storage_status, + # actuator_status=device_data.actuator_status, + ) # Generate instructions using the scheduler schedule_results = self.s2_scheduler.compute() From 4c79bacec2cdd7bcb75efe4b70a7f9eddc310fce Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:09:47 +0200 Subject: [PATCH 043/171] style: actual alignment Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index d114350325..6a7b04abb5 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -389,16 +389,24 @@ def _check_and_generate_instructions( """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) self.app.logger.debug( - f"System description? {'โœ… Go flight!' if device_data.system_description is not None else 'โŒ Hold on..'}" + "โœ… System description? Go flight!" + if device_data.system_description is not None + else "โŒ System description? Hold on.." ) self.app.logger.debug( - f"Fill level target profile? {'โœ… Go flight!' if device_data.fill_level_target_profile is not None else 'โŒ Hold on..'}" + "โœ… Fill level target profile? Go flight!" + if device_data.fill_level_target_profile is not None + else "โŒ Fill level target profile? Hold on.." ) self.app.logger.debug( - f"Storage status? {'โœ… Go flight!' if device_data.storage_status is not None else 'โŒ Hold on..'}" + "โœ… Storage status? Go flight!" + if device_data.storage_status is not None + else "โŒ Storage status? Hold on.." ) self.app.logger.debug( - f"Actuator status? {'โœ… Go flight!' if device_data.actuator_status is not None else 'โŒ Hold on..'}" + "โœ… Actuator status? Go flight!" + if device_data.actuator_status is not None + else "โŒ Actuator status? Hold on.." ) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") From 4786dec553e438b0af2985d36b79e749573205d2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:10:02 +0200 Subject: [PATCH 044/171] fix: set s2_scheduler.frbc_device_data instead Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 6a7b04abb5..2144cf2706 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -420,7 +420,7 @@ def _check_and_generate_instructions( # Use the S2Scheduler to create and store device state if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: # Create S2FrbcDeviceState from FRBC messages and store in scheduler - self.s2_scheduler.device_data = device_data + self.s2_scheduler.frbc_device_data = device_data self.s2_scheduler.device_state = self.s2_scheduler.create_device_states_from_frbc_data( # resource_id=resource_id, # system_description=device_data.system_description, From d20d683eb5e99df491763a5b4e905a8e706800cd Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:12:18 +0200 Subject: [PATCH 045/171] fix: mypy Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 2144cf2706..bcc26bb67c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -390,22 +390,22 @@ def _check_and_generate_instructions( device_data = self._device_data.get(resource_id) self.app.logger.debug( "โœ… System description? Go flight!" - if device_data.system_description is not None + if getattr(device_data, "system_description", None) is not None else "โŒ System description? Hold on.." ) self.app.logger.debug( "โœ… Fill level target profile? Go flight!" - if device_data.fill_level_target_profile is not None + if getattr(device_data, "fill_level_target_profile", None) is not None else "โŒ Fill level target profile? Hold on.." ) self.app.logger.debug( "โœ… Storage status? Go flight!" - if device_data.storage_status is not None + if getattr(device_data, "storage_status", None) is not None else "โŒ Storage status? Hold on.." ) self.app.logger.debug( "โœ… Actuator status? Go flight!" - if device_data.actuator_status is not None + if getattr(device_data, "actuator_status", None) is not None else "โŒ Actuator status? Hold on.." ) if device_data is None or not device_data.is_complete(): From 8fa1069ff62f3dfe6f58772299b01ca3469f7df1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:14:20 +0200 Subject: [PATCH 046/171] refactor: make debugging statements DRY Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index bcc26bb67c..8227328aa6 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -388,26 +388,17 @@ def _check_and_generate_instructions( ) -> None: """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) - self.app.logger.debug( - "โœ… System description? Go flight!" - if getattr(device_data, "system_description", None) is not None - else "โŒ System description? Hold on.." - ) - self.app.logger.debug( - "โœ… Fill level target profile? Go flight!" - if getattr(device_data, "fill_level_target_profile", None) is not None - else "โŒ Fill level target profile? Hold on.." - ) - self.app.logger.debug( - "โœ… Storage status? Go flight!" - if getattr(device_data, "storage_status", None) is not None - else "โŒ Storage status? Hold on.." - ) - self.app.logger.debug( - "โœ… Actuator status? Go flight!" - if getattr(device_data, "actuator_status", None) is not None - else "โŒ Actuator status? Hold on.." - ) + for attr in ( + "system_description", + "fill_level_target_profile", + "storage_status", + "actuator_status", + ): + self.app.logger.debug( + f"โœ… {attr}? Go flight!" + if getattr(device_data, attr, None) is not None + else f"โŒ {attr}? Hold on.." + ) if device_data is None or not device_data.is_complete(): self.app.logger.info(f"Waiting for more data from device {resource_id}") return From 3d429998b6818778ef6dc19434ce95268b1fb3bc Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:17:35 +0200 Subject: [PATCH 047/171] fix: HandshakeResponse should set new unique message ID, rather than copy the message ID from the handshake Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 8227328aa6..349a7e17f5 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -285,7 +285,7 @@ def handle_handshake( self.app.logger.info("Received Handshake (sync): %s", message.to_json()) handshake_response = HandshakeResponse( - message_id=message.message_id, + message_id=uuid.uuid4(), selected_protocol_version="1.0.0", ) self._send_and_forget(handshake_response, websocket) From c130755a7ca8b82559cb63c35d1b390b2d85615f Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:25:53 +0200 Subject: [PATCH 048/171] fix: select client-supported protocol version that is also supported by CEM Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 349a7e17f5..803891642f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -31,6 +31,7 @@ from s2python.message import S2Message from s2python.s2_parser import S2Parser from s2python.s2_validation_error import S2ValidationError +from s2python.version import S2_VERSION # Set up logging logging.basicConfig(level=logging.INFO) @@ -284,9 +285,14 @@ def handle_handshake( return self.app.logger.info("Received Handshake (sync): %s", message.to_json()) + if S2_VERSION not in message.supported_protocol_versions: + raise NotImplementedError( + f"Server supported protocol {S2_VERSION} not supported by client. Client supports: message.supported_protocol_versions" + ) + handshake_response = HandshakeResponse( message_id=uuid.uuid4(), - selected_protocol_version="1.0.0", + selected_protocol_version=S2_VERSION, ) self._send_and_forget(handshake_response, websocket) self.app.logger.info("HandshakeResponse sent (sync)") From d0d309920fd82ed8d723c71473e53f937442a66c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:31:19 +0200 Subject: [PATCH 049/171] dev: log tracebacks in debug level only Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 803891642f..45eef9b296 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -83,7 +83,7 @@ def handle_message( "While processing message %s an unrecoverable error occurred.", message_id, ) - logger.error("Error: %s", traceback.format_exc()) + logger.debug("Error: %s", traceback.format_exc()) server.respond_with_reception_status( subject_message_id=getattr( msg, @@ -451,5 +451,5 @@ def _check_and_generate_instructions( ) import traceback - self.app.logger.error(f"Traceback: {traceback.format_exc()}") + self.app.logger.debug(f"Traceback: {traceback.format_exc()}") # Continue processing other devices From db259e04d469ff217634cd7757de7184082927e0 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 12 Sep 2025 16:43:18 +0200 Subject: [PATCH 050/171] dev: log sent messages, too Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 45eef9b296..9864c1066b 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -295,7 +295,7 @@ def handle_handshake( selected_protocol_version=S2_VERSION, ) self._send_and_forget(handshake_response, websocket) - self.app.logger.info("HandshakeResponse sent (sync)") + self.app.logger.info(f"HandshakeResponse sent (sync): {handshake_response}") # If client is RM, send control type selection if hasattr(message, "role") and message.role == EnergyManagementRole.RM: self.app.logger.info("Sending control type selection (sync)") @@ -304,7 +304,9 @@ def handle_handshake( control_type=ControlType.FILL_RATE_BASED_CONTROL, ) self._send_and_forget(select_control_type, websocket) - self.app.logger.info("SelectControlType sent (sync)") + self.app.logger.info( + f"SelectControlType sent (sync): {select_control_type}" + ) def handle_reception_status( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock From 417df9da49765044f952a23ccf89bb5b0bf102be Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 17 Sep 2025 12:36:54 +0200 Subject: [PATCH 051/171] feat: better separate info and debug log statement Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 9864c1066b..56902d7d8c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -97,8 +97,7 @@ def handle_message( raise else: logger.warning( - "Received a message of type %s but no handler is registered. Ignoring the message.", - type(msg), + f"Ignoring message of type {type(msg)}; no handler is registered", ) def register_handler( @@ -162,17 +161,15 @@ def _ws_handler(self, ws: Sock) -> None: def _handle_websocket_connection(self, websocket: Sock) -> None: client_id = str(uuid.uuid4()) - self.app.logger.info("Client %s connected (sync).", client_id) + self.app.logger.info(f"Client {client_id} connected (sync)") self._connections[client_id] = websocket try: while True: message = websocket.receive() try: s2_msg = self.s2_parser.parse_as_any_message(message) - self.app.logger.info( - "Received message in _handle_websocket_connection: %s", - s2_msg.to_json(), - ) + self.app.logger.info("Received message from client") + self.app.logger.debug(s2_msg.to_json()) except json.JSONDecodeError: self.respond_with_reception_status( subject_message_id=uuid.UUID( @@ -225,7 +222,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: self.app.logger.error("Error processing message: %s", str(e)) raise except ConnectionClosed: - self.app.logger.info("Connection with client %s closed (sync)", client_id) + self.app.logger.info(f"Connection with client {client_id} closed (sync)") finally: if client_id in self._connections: del self._connections[client_id] @@ -246,7 +243,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: ): self.s2_scheduler.remove_device_state(resource_id) - self.app.logger.info("Client %s disconnected (sync)", client_id) + self.app.logger.info(f"Client {client_id} disconnected (sync)") def respond_with_reception_status( self, @@ -261,9 +258,7 @@ def respond_with_reception_status( diagnostic_label=diagnostic_label, ) self.app.logger.info( - "Sending reception status %s for message %s (sync)", - status, - subject_message_id, + f"Sending reception status {status} for message {subject_message_id} (sync)", ) try: websocket.send(response.to_json()) @@ -283,7 +278,8 @@ def handle_handshake( ) -> None: if not isinstance(message, Handshake): return - self.app.logger.info("Received Handshake (sync): %s", message.to_json()) + self.app.logger.info("Received Handshake (sync)") + self.app.logger.debug(message.to_json()) if S2_VERSION not in message.supported_protocol_versions: raise NotImplementedError( @@ -295,25 +291,26 @@ def handle_handshake( selected_protocol_version=S2_VERSION, ) self._send_and_forget(handshake_response, websocket) - self.app.logger.info(f"HandshakeResponse sent (sync): {handshake_response}") + self.app.logger.info("HandshakeResponse sent (sync)") + self.app.logger.debug(handshake_response) # If client is RM, send control type selection if hasattr(message, "role") and message.role == EnergyManagementRole.RM: - self.app.logger.info("Sending control type selection (sync)") + self.app.logger.debug("Sending control type selection (sync)") select_control_type = SelectControlType( message_id=uuid.uuid4(), control_type=ControlType.FILL_RATE_BASED_CONTROL, ) self._send_and_forget(select_control_type, websocket) - self.app.logger.info( - f"SelectControlType sent (sync): {select_control_type}" - ) + self.app.logger.info("SelectControlType sent (sync)") + self.app.logger.debug(select_control_type) def handle_reception_status( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock ) -> None: if not isinstance(message, ReceptionStatus): return - self.app.logger.info("Received ReceptionStatus (sync): %s", message.to_json()) + self.app.logger.info("Received ReceptionStatus (sync)") + self.app.logger.debug(message.to_json()) def handle_ResourceManagerDetails( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock From 3ace626bf6b4307a8d4766d5a136755035b73ffd Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Mon, 22 Sep 2025 10:05:14 +0200 Subject: [PATCH 052/171] Added state tracking for scheduling calling Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 64 +++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 56902d7d8c..77a290fc5b 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -6,11 +6,14 @@ import logging import traceback import uuid +from datetime import datetime, timedelta, timezone from typing import Any, Callable, Dict, Optional, Type +import isodate from flask import Flask from flask_sock import ConnectionClosed, Sock +from flexmeasures.api.common.utils.validators import parse_duration from s2python.common import ( ControlType, EnergyManagementRole, @@ -58,6 +61,24 @@ def is_complete(self) -> bool: ) +class ConnectionState: + """Class to track the state of each WebSocket connection for rate limiting.""" + + def __init__(self): + self.last_compute_time: Optional[datetime] = None + self.resource_id: Optional[str] = None + + def can_compute(self, replanning_frequency: timedelta) -> bool: + """Check if enough time has passed since the last compute call.""" + if self.last_compute_time is None: + return True + return datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency + + def update_compute_time(self) -> None: + """Update the last compute time to now.""" + self.last_compute_time = datetime.now(timezone.utc) + + class MessageHandlersSync: """Class to manage sync message handlers for different message types.""" @@ -129,6 +150,9 @@ def __init__( self._websocket_to_resource: Dict[Sock, str] = ( {} ) # Map websocket to resource_id + self._connection_states: Dict[Sock, ConnectionState] = ( + {} + ) # Track connection state for rate limiting self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) @@ -163,6 +187,8 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: client_id = str(uuid.uuid4()) self.app.logger.info(f"Client {client_id} connected (sync)") self._connections[client_id] = websocket + # Initialize connection state for rate limiting + self._connection_states[websocket] = ConnectionState() try: while True: message = websocket.receive() @@ -243,6 +269,10 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: ): self.s2_scheduler.remove_device_state(resource_id) + # Clean up connection state + if websocket in self._connection_states: + del self._connection_states[websocket] + self.app.logger.info(f"Client {client_id} disconnected (sync)") def respond_with_reception_status( @@ -408,6 +438,37 @@ def _check_and_generate_instructions( self.app.logger.info(f"Waiting for more data from device {resource_id}") return + # Check rate limiting based on FLEXMEASURES_S2_REPLANNING_FREQUENCY + connection_state = self._connection_states.get(websocket) + if connection_state is None: + self.app.logger.warning(f"No connection state found for device {resource_id}") + return + + # Parse replanning frequency from config + replanning_freq_str = self.app.config.get("FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M") + try: + replanning_frequency = parse_duration(replanning_freq_str) + if replanning_frequency is None: + raise ValueError(f"Invalid duration format: {replanning_freq_str}") + if not isinstance(replanning_frequency, timedelta): + # Handle isodate.Duration objects by converting to timedelta + # For simplicity, assume it's a basic duration that can be converted + replanning_frequency = timedelta(seconds=replanning_frequency.total_seconds()) + except Exception as e: + self.app.logger.error(f"Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}") + replanning_frequency = timedelta(minutes=5) # Default to 5 minutes + + # Check if we can compute based on rate limiting + if not connection_state.can_compute(replanning_frequency): + time_since_last = datetime.now(timezone.utc) - connection_state.last_compute_time + remaining_time = replanning_frequency - time_since_last + self.app.logger.info( + f"Rate limiting: Cannot generate instructions for device {resource_id}. " + f"Last compute was {time_since_last.total_seconds():.1f}s ago. " + f"Need to wait {remaining_time.total_seconds():.1f}s more." + ) + return + self.app.logger.info( f"All data received for device {resource_id}, generating instructions" ) @@ -425,6 +486,9 @@ def _check_and_generate_instructions( # actuator_status=device_data.actuator_status, ) + # Update the compute time before calling the scheduler + connection_state.update_compute_time() + # Generate instructions using the scheduler schedule_results = self.s2_scheduler.compute() From 5778dabb387c25df8c47e88f534939a2110d2a0e Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 23 Sep 2025 10:54:28 +0200 Subject: [PATCH 053/171] Clean-up of instruction list before sending it Signed-off-by: Vlad Iftime --- flexmeasures/ws/__init__.py | 1 - flexmeasures/ws/s2_ws_sync.py | 46 +++++++++++++++++++++++++++++------ 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/flexmeasures/ws/__init__.py b/flexmeasures/ws/__init__.py index f565dea771..ab877f4798 100644 --- a/flexmeasures/ws/__init__.py +++ b/flexmeasures/ws/__init__.py @@ -1,7 +1,6 @@ import importlib import pkgutil from flask import Blueprint, current_app -from simple_websocket import Server from flask_security import auth_token_required from flask_sock import Sock diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 77a290fc5b..6c68ad1c7d 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -67,6 +67,7 @@ class ConnectionState: def __init__(self): self.last_compute_time: Optional[datetime] = None self.resource_id: Optional[str] = None + self.last_operation_mode: Optional[uuid.UUID] = None def can_compute(self, replanning_frequency: timedelta) -> bool: """Check if enough time has passed since the last compute call.""" @@ -303,6 +304,23 @@ def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: except ConnectionClosed: self.app.logger.warning("Connection closed while sending message (sync)") + def _filter_instructions_by_operation_mode(self, instructions: list, connection_state: ConnectionState) -> list: + """Filter instructions to only include those with different operation_mode than the previous instruction.""" + if not instructions: + return instructions + + filtered = [] + last_operation_mode = connection_state.last_operation_mode + + for instruction in instructions: + # Always include the first instruction if we haven't sent any before + # or if the operation mode is different from the last sent instruction + if last_operation_mode is None or instruction.operation_mode != last_operation_mode: + filtered.append(instruction) + last_operation_mode = instruction.operation_mode + + return filtered + def handle_handshake( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock ) -> None: @@ -492,14 +510,28 @@ def _check_and_generate_instructions( # Generate instructions using the scheduler schedule_results = self.s2_scheduler.compute() - # Send generated instructions + # Filter and send generated instructions + frbc_instructions = [result for result in schedule_results if isinstance(result, FRBCInstruction)] + filtered_instructions = self._filter_instructions_by_operation_mode(frbc_instructions, connection_state) + + for instruction in filtered_instructions: + self._send_and_forget(instruction, websocket) + self.app.logger.info( + f"Sent FRBC instruction: {instruction.to_json()}" + ) + # Update the last operation mode for this connection + connection_state.last_operation_mode = instruction.operation_mode + + # Log filtering results + if len(frbc_instructions) > len(filtered_instructions): + self.app.logger.info( + f"Filtered instructions: {len(frbc_instructions)} -> {len(filtered_instructions)} " + f"(reduced by {len(frbc_instructions) - len(filtered_instructions)})" + ) + + # Process non-instruction results for result in schedule_results: - if isinstance(result, FRBCInstruction): - self._send_and_forget(result, websocket) - self.app.logger.info( - f"Sent FRBC instruction: {result.to_json()}" - ) - elif isinstance(result, dict) and "sensor" in result: + if isinstance(result, dict) and "sensor" in result: # TODO: save result["data"] to sensor if needed for FlexMeasures pass else: From b57eca43f33bd889114c98a7a374ce140df16bb0 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 23 Sep 2025 13:25:55 +0200 Subject: [PATCH 054/171] fix: Update WebSocket integration to use S2FlaskScheduler Update app.py and s2_ws_sync.py to use the new S2FlaskScheduler class instead of S2Scheduler for proper Flask application context integration. This fixes WebSocket scheduler initialization and ensures proper integration with Flask logging and configuration systems. --- flexmeasures/app.py | 8 ++++---- flexmeasures/ws/s2_ws_sync.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index fe552394a4..7a43a37538 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -260,8 +260,8 @@ def ws_connection_auth(): if not hasattr(s2_ws, 's2_scheduler'): from datetime import datetime, timedelta, timezone - # Get S2Scheduler class from registered schedulers - scheduler_class = app.data_generators["scheduler"]["S2Scheduler"] + # Get S2FlaskScheduler class from registered schedulers + scheduler_class = app.data_generators["scheduler"]["S2FlaskScheduler"] # Create scheduler instance with minimal setup for WebSocket usage scheduler = scheduler_class.__new__(scheduler_class) @@ -289,7 +289,7 @@ def ws_connection_auth(): scheduler.flex_model = {} scheduler.flex_context = {} scheduler.fallback_scheduler_class = None - scheduler.info = {"scheduler": "S2Scheduler"} + scheduler.info = {"scheduler": "S2FlaskScheduler"} scheduler.config_deserialized = True scheduler.return_multiple = True @@ -298,7 +298,7 @@ def ws_connection_auth(): # Attach scheduler to WebSocket server s2_ws.s2_scheduler = scheduler - app.logger.info("S2Scheduler initialized for WebSocket connections") + app.logger.info("S2FlaskScheduler initialized for WebSocket connections") return # Let other before_request hooks handle it diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 6c68ad1c7d..e32141c50d 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -492,7 +492,7 @@ def _check_and_generate_instructions( ) try: - # Use the S2Scheduler to create and store device state + # Use the S2FlaskScheduler to create and store device state if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: # Create S2FrbcDeviceState from FRBC messages and store in scheduler self.s2_scheduler.frbc_device_data = device_data @@ -537,7 +537,7 @@ def _check_and_generate_instructions( else: # Scheduler not available - log warning and skip instruction generation self.app.logger.warning( - f"S2Scheduler not available for device {resource_id}, cannot generate instructions" + f"S2FlaskScheduler not available for device {resource_id}, cannot generate instructions" ) except Exception as e: From bc3aa900962384945441436e9521846bf426c204 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 24 Sep 2025 09:58:29 +0200 Subject: [PATCH 055/171] docs: add more context to info log message Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index e32141c50d..9d7dd3ad1f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -453,7 +453,9 @@ def _check_and_generate_instructions( else f"โŒ {attr}? Hold on.." ) if device_data is None or not device_data.is_complete(): - self.app.logger.info(f"Waiting for more data from device {resource_id}") + self.app.logger.info( + f"Waiting for more data from device {resource_id} before running the S2FlaskScheduler" + ) return # Check rate limiting based on FLEXMEASURES_S2_REPLANNING_FREQUENCY From fb0fb61bfcac85c64652a2a7ef442a016a8b7a8c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 24 Sep 2025 10:00:45 +0200 Subject: [PATCH 056/171] style: black and flake8 Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 58 ++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 9d7dd3ad1f..4925974661 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -9,7 +9,6 @@ from datetime import datetime, timedelta, timezone from typing import Any, Callable, Dict, Optional, Type -import isodate from flask import Flask from flask_sock import ConnectionClosed, Sock @@ -73,7 +72,9 @@ def can_compute(self, replanning_frequency: timedelta) -> bool: """Check if enough time has passed since the last compute call.""" if self.last_compute_time is None: return True - return datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency + return ( + datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency + ) def update_compute_time(self) -> None: """Update the last compute time to now.""" @@ -304,21 +305,26 @@ def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: except ConnectionClosed: self.app.logger.warning("Connection closed while sending message (sync)") - def _filter_instructions_by_operation_mode(self, instructions: list, connection_state: ConnectionState) -> list: + def _filter_instructions_by_operation_mode( + self, instructions: list, connection_state: ConnectionState + ) -> list: """Filter instructions to only include those with different operation_mode than the previous instruction.""" if not instructions: return instructions - + filtered = [] last_operation_mode = connection_state.last_operation_mode - + for instruction in instructions: # Always include the first instruction if we haven't sent any before # or if the operation mode is different from the last sent instruction - if last_operation_mode is None or instruction.operation_mode != last_operation_mode: + if ( + last_operation_mode is None + or instruction.operation_mode != last_operation_mode + ): filtered.append(instruction) last_operation_mode = instruction.operation_mode - + return filtered def handle_handshake( @@ -436,7 +442,7 @@ def handle_frbc_actuator_status( self._device_data[resource_id].actuator_status = message self._check_and_generate_instructions(resource_id, websocket) - def _check_and_generate_instructions( + def _check_and_generate_instructions( # noqa: C901 self, resource_id: str, websocket: Sock ) -> None: """Check if we have all required data and generate instructions if so.""" @@ -461,11 +467,15 @@ def _check_and_generate_instructions( # Check rate limiting based on FLEXMEASURES_S2_REPLANNING_FREQUENCY connection_state = self._connection_states.get(websocket) if connection_state is None: - self.app.logger.warning(f"No connection state found for device {resource_id}") + self.app.logger.warning( + f"No connection state found for device {resource_id}" + ) return # Parse replanning frequency from config - replanning_freq_str = self.app.config.get("FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M") + replanning_freq_str = self.app.config.get( + "FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M" + ) try: replanning_frequency = parse_duration(replanning_freq_str) if replanning_frequency is None: @@ -473,14 +483,20 @@ def _check_and_generate_instructions( if not isinstance(replanning_frequency, timedelta): # Handle isodate.Duration objects by converting to timedelta # For simplicity, assume it's a basic duration that can be converted - replanning_frequency = timedelta(seconds=replanning_frequency.total_seconds()) + replanning_frequency = timedelta( + seconds=replanning_frequency.total_seconds() + ) except Exception as e: - self.app.logger.error(f"Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}") + self.app.logger.error( + f"Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}" + ) replanning_frequency = timedelta(minutes=5) # Default to 5 minutes # Check if we can compute based on rate limiting if not connection_state.can_compute(replanning_frequency): - time_since_last = datetime.now(timezone.utc) - connection_state.last_compute_time + time_since_last = ( + datetime.now(timezone.utc) - connection_state.last_compute_time + ) remaining_time = replanning_frequency - time_since_last self.app.logger.info( f"Rate limiting: Cannot generate instructions for device {resource_id}. " @@ -513,9 +529,15 @@ def _check_and_generate_instructions( schedule_results = self.s2_scheduler.compute() # Filter and send generated instructions - frbc_instructions = [result for result in schedule_results if isinstance(result, FRBCInstruction)] - filtered_instructions = self._filter_instructions_by_operation_mode(frbc_instructions, connection_state) - + frbc_instructions = [ + result + for result in schedule_results + if isinstance(result, FRBCInstruction) + ] + filtered_instructions = self._filter_instructions_by_operation_mode( + frbc_instructions, connection_state + ) + for instruction in filtered_instructions: self._send_and_forget(instruction, websocket) self.app.logger.info( @@ -523,14 +545,14 @@ def _check_and_generate_instructions( ) # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode - + # Log filtering results if len(frbc_instructions) > len(filtered_instructions): self.app.logger.info( f"Filtered instructions: {len(frbc_instructions)} -> {len(filtered_instructions)} " f"(reduced by {len(frbc_instructions) - len(filtered_instructions)})" ) - + # Process non-instruction results for result in schedule_results: if isinstance(result, dict) and "sensor" in result: From 53b7e03cbfbe21ec92c9c704644b64b6407017d1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 24 Sep 2025 10:16:51 +0200 Subject: [PATCH 057/171] dev: don't (debug) log S2 messages that are too verbose Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 4925974661..413265f0af 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -196,8 +196,12 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: message = websocket.receive() try: s2_msg = self.s2_parser.parse_as_any_message(message) - self.app.logger.info("Received message from client") - self.app.logger.debug(s2_msg.to_json()) + self.app.logger.info(f"Received {s2_msg.message_type }message from client") + + # Don't log verbose messages + verbose_message_types = ["FRBC.UsageForecast"] + if s2_msg.message_type not in verbose_message_types: + self.app.logger.debug(s2_msg.to_json()) except json.JSONDecodeError: self.respond_with_reception_status( subject_message_id=uuid.UUID( From 04f2eafe2b2bdddb04266e644ed4b09cdd223638 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 24 Sep 2025 10:25:02 +0200 Subject: [PATCH 058/171] fix: typo Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 413265f0af..e52578db4a 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -196,7 +196,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: message = websocket.receive() try: s2_msg = self.s2_parser.parse_as_any_message(message) - self.app.logger.info(f"Received {s2_msg.message_type }message from client") + self.app.logger.info(f"Received {s2_msg.message_type} message from client") # Don't log verbose messages verbose_message_types = ["FRBC.UsageForecast"] From 0d1286066c0093b32b0cef6a07cf0dd9b3f2c411 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 24 Sep 2025 11:04:50 +0200 Subject: [PATCH 059/171] Wait for all actuator statuses to be sent before scheduling Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 60 ++++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 19 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index e52578db4a..78045ed043 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -47,17 +47,26 @@ def __init__(self): self.system_description: Optional[FRBCSystemDescription] = None self.fill_level_target_profile: Optional[FRBCFillLevelTargetProfile] = None self.storage_status: Optional[FRBCStorageStatus] = None - self.actuator_status: Optional[FRBCActuatorStatus] = None + self.actuator_statuses: Dict[str, FRBCActuatorStatus] = {} # Changed to dict by actuator_id self.resource_id: Optional[str] = None def is_complete(self) -> bool: """Check if we have received all necessary data to generate instructions.""" - return ( - self.system_description is not None - and self.fill_level_target_profile is not None - and self.storage_status is not None - and self.actuator_status is not None - ) + # Check basic required data + if ( + self.system_description is None + or self.fill_level_target_profile is None + or self.storage_status is None + ): + return False + + # Check that we have actuator status for ALL actuators in system description + if self.system_description.actuators: + required_actuator_ids = {str(actuator.id) for actuator in self.system_description.actuators} + received_actuator_ids = set(self.actuator_statuses.keys()) + return required_actuator_ids.issubset(received_actuator_ids) + + return True class ConnectionState: @@ -443,7 +452,8 @@ def handle_frbc_actuator_status( if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - self._device_data[resource_id].actuator_status = message + # Store actuator status by actuator_id to support multiple actuators + self._device_data[resource_id].actuator_statuses[str(message.actuator_id)] = message self._check_and_generate_instructions(resource_id, websocket) def _check_and_generate_instructions( # noqa: C901 @@ -451,17 +461,29 @@ def _check_and_generate_instructions( # noqa: C901 ) -> None: """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) - for attr in ( - "system_description", - "fill_level_target_profile", - "storage_status", - "actuator_status", - ): - self.app.logger.debug( - f"โœ… {attr}? Go flight!" - if getattr(device_data, attr, None) is not None - else f"โŒ {attr}? Hold on.." - ) + if device_data: + # Debug log basic attributes + for attr in ( + "system_description", + "fill_level_target_profile", + "storage_status", + ): + self.app.logger.debug( + f"โœ… {attr}? Go flight!" + if getattr(device_data, attr, None) is not None + else f"โŒ {attr}? Hold on.." + ) + + # Debug log actuator statuses + if device_data.system_description and device_data.system_description.actuators: + required_actuators = {str(a.id) for a in device_data.system_description.actuators} + received_actuators = set(device_data.actuator_statuses.keys()) + missing_actuators = required_actuators - received_actuators + + if missing_actuators: + self.app.logger.debug(f"โŒ actuator_status? Hold on.. Missing: {missing_actuators}") + else: + self.app.logger.debug(f"โœ… actuator_status? Go flight! All {len(required_actuators)} actuators received") if device_data is None or not device_data.is_complete(): self.app.logger.info( f"Waiting for more data from device {resource_id} before running the S2FlaskScheduler" From d0ec87960fdf5c7daa19260a05cc875943914e37 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 24 Sep 2025 11:17:45 +0200 Subject: [PATCH 060/171] Chore: cleaned logs from Reception status Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 78045ed043..ae2d372f51 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -376,7 +376,6 @@ def handle_reception_status( ) -> None: if not isinstance(message, ReceptionStatus): return - self.app.logger.info("Received ReceptionStatus (sync)") self.app.logger.debug(message.to_json()) def handle_ResourceManagerDetails( From 870b3d5060b676f55f95caf4250fbda729374b2b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 24 Sep 2025 14:08:16 +0200 Subject: [PATCH 061/171] refactor: ensure_resource_is_registered Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index ae2d372f51..8886c6396e 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -404,8 +404,7 @@ def handle_frbc_system_description( # Get resource_id from websocket mapping resource_id = self._websocket_to_resource.get(websocket, "default_resource") - if resource_id not in self._device_data: - self._device_data[resource_id] = FRBCDeviceData() + self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].system_description = message self._check_and_generate_instructions(resource_id, websocket) @@ -420,8 +419,7 @@ def handle_frbc_fill_level_target_profile( ) resource_id = self._websocket_to_resource.get(websocket, "default_resource") - if resource_id not in self._device_data: - self._device_data[resource_id] = FRBCDeviceData() + self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].fill_level_target_profile = message self._check_and_generate_instructions(resource_id, websocket) @@ -434,8 +432,7 @@ def handle_frbc_storage_status( self.app.logger.info("Received FRBCStorageStatus: %s", message.to_json()) resource_id = self._websocket_to_resource.get(websocket, "default_resource") - if resource_id not in self._device_data: - self._device_data[resource_id] = FRBCDeviceData() + self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].storage_status = message self._check_and_generate_instructions(resource_id, websocket) @@ -448,13 +445,16 @@ def handle_frbc_actuator_status( self.app.logger.info("Received FRBCActuatorStatus: %s", message.to_json()) resource_id = self._websocket_to_resource.get(websocket, "default_resource") - if resource_id not in self._device_data: - self._device_data[resource_id] = FRBCDeviceData() + self.ensure_resource_is_registered(resource_id=resource_id) # Store actuator status by actuator_id to support multiple actuators self._device_data[resource_id].actuator_statuses[str(message.actuator_id)] = message self._check_and_generate_instructions(resource_id, websocket) + def ensure_resource_is_registered(self, resource_id: str): + if resource_id not in self._device_data: + self._device_data[resource_id] = FRBCDeviceData() + def _check_and_generate_instructions( # noqa: C901 self, resource_id: str, websocket: Sock ) -> None: From a11f9998df7b8939d92a660048cc6d66b6bafb8a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 24 Sep 2025 14:08:54 +0200 Subject: [PATCH 062/171] style: black Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 8886c6396e..8ac3faeead 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -59,13 +59,13 @@ def is_complete(self) -> bool: or self.storage_status is None ): return False - + # Check that we have actuator status for ALL actuators in system description if self.system_description.actuators: required_actuator_ids = {str(actuator.id) for actuator in self.system_description.actuators} received_actuator_ids = set(self.actuator_statuses.keys()) return required_actuator_ids.issubset(received_actuator_ids) - + return True @@ -464,7 +464,7 @@ def _check_and_generate_instructions( # noqa: C901 # Debug log basic attributes for attr in ( "system_description", - "fill_level_target_profile", + "fill_level_target_profile", "storage_status", ): self.app.logger.debug( @@ -472,13 +472,13 @@ def _check_and_generate_instructions( # noqa: C901 if getattr(device_data, attr, None) is not None else f"โŒ {attr}? Hold on.." ) - + # Debug log actuator statuses if device_data.system_description and device_data.system_description.actuators: required_actuators = {str(a.id) for a in device_data.system_description.actuators} received_actuators = set(device_data.actuator_statuses.keys()) missing_actuators = required_actuators - received_actuators - + if missing_actuators: self.app.logger.debug(f"โŒ actuator_status? Hold on.. Missing: {missing_actuators}") else: From 0a4384dae7efb9ae7ee3e11074def95edd64396c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 11:46:46 +0200 Subject: [PATCH 063/171] fix: unresolved attribute reference (typing warning) Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 2 +- flexmeasures/ws/s2_ws_sync.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 7a43a37538..aa9bc29db2 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -257,7 +257,7 @@ def ws_connection_auth(): token = auth_header.removeprefix("Bearer ").strip() if token == app.config.get("WEBSOCKET_BEARER_TOKEN", None): # Initialize S2Scheduler for this WebSocket connection if not already done - if not hasattr(s2_ws, 's2_scheduler'): + if not hasattr(s2_ws, 's2_scheduler') or s2_ws.s2_scheduler is None: from datetime import datetime, timedelta, timezone # Get S2FlaskScheduler class from registered schedulers diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 8ac3faeead..cea5015a9f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -166,6 +166,7 @@ def __init__( ) # Track connection state for rate limiting self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) + self.s2_scheduler = None def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) From 2392bf1f73be671bba15e50cb245225dd49388b1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 11:47:27 +0200 Subject: [PATCH 064/171] refactor: simplify condition Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index aa9bc29db2..0156ae400e 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -257,7 +257,7 @@ def ws_connection_auth(): token = auth_header.removeprefix("Bearer ").strip() if token == app.config.get("WEBSOCKET_BEARER_TOKEN", None): # Initialize S2Scheduler for this WebSocket connection if not already done - if not hasattr(s2_ws, 's2_scheduler') or s2_ws.s2_scheduler is None: + if getattr(s2_ws, 's2_scheduler', None) is None: from datetime import datetime, timedelta, timezone # Get S2FlaskScheduler class from registered schedulers From 1de460cb3c317b0fe7b1ad5dfaf439b8aac7a3a5 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 11:51:53 +0200 Subject: [PATCH 065/171] feat: switch from to WEBSOCKET_BEARER_TOKEN to FLEXMEASURES_S2_BEARERS (multiple bearer tokens) Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 0156ae400e..94517b0b07 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -21,6 +21,8 @@ from redis import Redis from rq import Queue +from flexmeasures import User +from flexmeasures.data import db from flexmeasures.data.services.job_cache import JobCache @@ -255,7 +257,15 @@ def ws_connection_auth(): auth_header = request.headers.get("Authorization", "") if auth_header.startswith("Bearer "): token = auth_header.removeprefix("Bearer ").strip() - if token == app.config.get("WEBSOCKET_BEARER_TOKEN", None): + user_id = app.config.get("FLEXMEASURES_S2_BEARERS", {}).get(token, None) + if user_id is not None: + + user = db.session.get(User, user_id) + + # Attach account to WebSocket server + s2_ws.account = user.account + app.logger.info("Account authorized for WebSocket connections") + # Initialize S2Scheduler for this WebSocket connection if not already done if getattr(s2_ws, 's2_scheduler', None) is None: from datetime import datetime, timedelta, timezone From 0055462b058c483bead3e92004da19acdb2d4441 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 12:16:06 +0200 Subject: [PATCH 066/171] feat: create unique asset for each unique resource Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index cea5015a9f..9edd6f3131 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -12,7 +12,9 @@ from flask import Flask from flask_sock import ConnectionClosed, Sock +from flexmeasures import Asset from flexmeasures.api.common.utils.validators import parse_duration +from flexmeasures.data.services.utils import get_or_create_model from s2python.common import ( ControlType, EnergyManagementRole, @@ -453,6 +455,9 @@ def handle_frbc_actuator_status( self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): + self._assets[resource_id] = get_or_create_model( + Asset, name=resource_id, account_id=self.account.id + ) if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() From d73a689a189d5bdb1fce42d327d65004b0f1a470 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 12:16:40 +0200 Subject: [PATCH 067/171] fix: initialize properties with empty defaults Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 9edd6f3131..bd8fea2180 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -169,6 +169,8 @@ def __init__( self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) self.s2_scheduler = None + self.account = None + self._assets: Dict[str, Asset] = {} def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) From ecd6f202ba9d73d4e8d9c5e4bc92c6c663d0b8c6 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:15:00 +0200 Subject: [PATCH 068/171] dev: debug using server log Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index 148ed7984b..38a14fb180 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -1,4 +1,6 @@ +from flask import current_app, stream_with_context, Response from flask_security import auth_token_required +from werkzeug.exceptions import NotFound from flexmeasures.auth.decorators import roles_required from flexmeasures.api.common import flexmeasures_api as flexmeasures_api_ops @@ -20,3 +22,20 @@ def get_task_run(): @roles_required("task-runner") def post_task_run(): return ops_impl.post_task_run() + + +@flexmeasures_api_ops.route("/logs") +def stream_logs(): + """Stream server logs for debugging.""" + if current_app.config.get("LOGGING_LEVEL") != "DEBUG": + raise NotFound + + def generate(): + with open("flexmeasures.log") as f: + f.seek(0, 2) # go to end of file + while True: + line = f.readline() + if line: + yield line + + return Response(stream_with_context(generate()), mimetype="text/plain") From 18582eb0742d0246c08776aa3cd954caacad0e76 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:17:11 +0200 Subject: [PATCH 069/171] dev: add extra security Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index 38a14fb180..711a3c5d54 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -25,6 +25,8 @@ def post_task_run(): @flexmeasures_api_ops.route("/logs") +@auth_token_required +@roles_required("debugger") def stream_logs(): """Stream server logs for debugging.""" if current_app.config.get("LOGGING_LEVEL") != "DEBUG": From 6e7a196e339339c99450fb936284b99cbb238bd4 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:25:19 +0200 Subject: [PATCH 070/171] dev: switch to login required Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index 711a3c5d54..e427c163e9 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -1,5 +1,5 @@ from flask import current_app, stream_with_context, Response -from flask_security import auth_token_required +from flask_security import auth_token_required, login_required from werkzeug.exceptions import NotFound from flexmeasures.auth.decorators import roles_required @@ -25,7 +25,7 @@ def post_task_run(): @flexmeasures_api_ops.route("/logs") -@auth_token_required +@login_required @roles_required("debugger") def stream_logs(): """Stream server logs for debugging.""" From 2378b334bc8ac5b439e2a8b0f9e96e050fe3f41c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:37:50 +0200 Subject: [PATCH 071/171] Revert "fix: initialize properties with empty defaults" This reverts commit d73a689a189d5bdb1fce42d327d65004b0f1a470. --- flexmeasures/ws/s2_ws_sync.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index bd8fea2180..9edd6f3131 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -169,8 +169,6 @@ def __init__( self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) self.s2_scheduler = None - self.account = None - self._assets: Dict[str, Asset] = {} def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) From 858c0c54be7a598ab2d024dce42900fa4bc2a9c9 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:37:51 +0200 Subject: [PATCH 072/171] Revert "feat: create unique asset for each unique resource" This reverts commit 0055462b058c483bead3e92004da19acdb2d4441. --- flexmeasures/ws/s2_ws_sync.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 9edd6f3131..cea5015a9f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -12,9 +12,7 @@ from flask import Flask from flask_sock import ConnectionClosed, Sock -from flexmeasures import Asset from flexmeasures.api.common.utils.validators import parse_duration -from flexmeasures.data.services.utils import get_or_create_model from s2python.common import ( ControlType, EnergyManagementRole, @@ -455,9 +453,6 @@ def handle_frbc_actuator_status( self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): - self._assets[resource_id] = get_or_create_model( - Asset, name=resource_id, account_id=self.account.id - ) if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() From c45472d50566fab05762abea19d7a8b660b09131 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:37:51 +0200 Subject: [PATCH 073/171] Revert "feat: switch from to WEBSOCKET_BEARER_TOKEN to FLEXMEASURES_S2_BEARERS (multiple bearer tokens)" This reverts commit 1de460cb3c317b0fe7b1ad5dfaf439b8aac7a3a5. --- flexmeasures/app.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 94517b0b07..0156ae400e 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -21,8 +21,6 @@ from redis import Redis from rq import Queue -from flexmeasures import User -from flexmeasures.data import db from flexmeasures.data.services.job_cache import JobCache @@ -257,15 +255,7 @@ def ws_connection_auth(): auth_header = request.headers.get("Authorization", "") if auth_header.startswith("Bearer "): token = auth_header.removeprefix("Bearer ").strip() - user_id = app.config.get("FLEXMEASURES_S2_BEARERS", {}).get(token, None) - if user_id is not None: - - user = db.session.get(User, user_id) - - # Attach account to WebSocket server - s2_ws.account = user.account - app.logger.info("Account authorized for WebSocket connections") - + if token == app.config.get("WEBSOCKET_BEARER_TOKEN", None): # Initialize S2Scheduler for this WebSocket connection if not already done if getattr(s2_ws, 's2_scheduler', None) is None: from datetime import datetime, timedelta, timezone From 87ebebb75a41fe872ed4c48147777af022060da5 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:45:06 +0200 Subject: [PATCH 074/171] dev: yield occasionally Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index e427c163e9..3aa6d4bb0e 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -1,3 +1,5 @@ +import time + from flask import current_app, stream_with_context, Response from flask_security import auth_token_required, login_required from werkzeug.exceptions import NotFound @@ -39,5 +41,7 @@ def generate(): line = f.readline() if line: yield line + else: + time.sleep(0.5) return Response(stream_with_context(generate()), mimetype="text/plain") From bcc84048f858867257484cf4a5378ed6032794fe Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:48:54 +0200 Subject: [PATCH 075/171] dev: switch away from live-streaming Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index 3aa6d4bb0e..3e641670f8 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -1,8 +1,8 @@ -import time +import os -from flask import current_app, stream_with_context, Response +from flask import current_app, Response from flask_security import auth_token_required, login_required -from werkzeug.exceptions import NotFound +from werkzeug.exceptions import NotFound, abort from flexmeasures.auth.decorators import roles_required from flexmeasures.api.common import flexmeasures_api as flexmeasures_api_ops @@ -34,14 +34,9 @@ def stream_logs(): if current_app.config.get("LOGGING_LEVEL") != "DEBUG": raise NotFound - def generate(): - with open("flexmeasures.log") as f: - f.seek(0, 2) # go to end of file - while True: - line = f.readline() - if line: - yield line - else: - time.sleep(0.5) - - return Response(stream_with_context(generate()), mimetype="text/plain") + log_file = "flexmeasures.log" + if not os.path.exists(log_file): + abort(404, "Log file not found") + with open(log_file, "r") as f: + lines = f.readlines()[-200:] # last 200 lines + return Response("".join(lines), mimetype="text/plain") From c663c32b6b14af104fd8656c0d904614c961d3ac Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 12:50:51 +0200 Subject: [PATCH 076/171] dev: set dynamic line limit Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index 3e641670f8..915dc3f014 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -1,6 +1,6 @@ import os -from flask import current_app, Response +from flask import current_app, request, Response from flask_security import auth_token_required, login_required from werkzeug.exceptions import NotFound, abort @@ -35,8 +35,9 @@ def stream_logs(): raise NotFound log_file = "flexmeasures.log" + n = int(request.args.get("tail", 200)) if not os.path.exists(log_file): abort(404, "Log file not found") with open(log_file, "r") as f: - lines = f.readlines()[-200:] # last 200 lines + lines = f.readlines()[-n:] # last n lines return Response("".join(lines), mimetype="text/plain") From 9ee53a609f8c4fa7a4fbabdd5d3e07cd525279d0 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 13:20:34 +0200 Subject: [PATCH 077/171] dev: try dequeueing Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index 915dc3f014..4be4143a83 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -1,3 +1,4 @@ +from collections import deque import os from flask import current_app, request, Response @@ -39,5 +40,5 @@ def stream_logs(): if not os.path.exists(log_file): abort(404, "Log file not found") with open(log_file, "r") as f: - lines = f.readlines()[-n:] # last n lines - return Response("".join(lines), mimetype="text/plain") + last_n_lines = deque(f, maxlen=n) + return Response("".join(last_n_lines), mimetype="text/plain") From 98c839d4502e8895c623a26e9647a61acd135589 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 1 Oct 2025 13:20:49 +0200 Subject: [PATCH 078/171] dev: update method name Signed-off-by: F.N. Claessen --- flexmeasures/api/common/routes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/api/common/routes.py b/flexmeasures/api/common/routes.py index 4be4143a83..5e36c7821b 100644 --- a/flexmeasures/api/common/routes.py +++ b/flexmeasures/api/common/routes.py @@ -30,8 +30,8 @@ def post_task_run(): @flexmeasures_api_ops.route("/logs") @login_required @roles_required("debugger") -def stream_logs(): - """Stream server logs for debugging.""" +def show_logs(): + """Show server logs for debugging.""" if current_app.config.get("LOGGING_LEVEL") != "DEBUG": raise NotFound From 9febee1cb5e8e95b825778f0683dac738943432c Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Thu, 9 Oct 2025 14:12:06 +0200 Subject: [PATCH 079/171] docs: changelog entry Signed-off-by: Vlad Iftime --- .vscode/settings.json | 40 +++++++++++++++++------------------ documentation/changelog.rst | 1 + flexmeasures/ws/s2_ws_sync.py | 3 ++- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 009d148db5..8649119566 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,23 +1,21 @@ { - "spellright.language": [ - "en_US" - ], - "spellright.documentTypes": [ - "markdown", - "latex", - "plaintext", - "restructuredtext", - "python" - ], - "python.linting.enabled": true, - "python.linting.pylintEnabled": false, - "python.linting.flake8Enabled": true, - "workbench.editor.wrapTabs": true, - "python.formatting.provider": "black", - "python.testing.pytestArgs": [ - "flexmeasures" - ], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "python.analysis.autoImportCompletions": true + "spellright.language": ["en_US"], + "spellright.documentTypes": [ + "markdown", + "latex", + "plaintext", + "restructuredtext", + "python" + ], + "python.linting.enabled": true, + "python.linting.pylintEnabled": false, + "python.linting.flake8Enabled": true, + "workbench.editor.wrapTabs": true, + "python.formatting.provider": "black", + "python.testing.pytestArgs": ["flexmeasures"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.analysis.autoImportCompletions": true, + "stm32-for-vscode.openOCDPath": false, + "stm32-for-vscode.armToolchainPath": false } diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 43e52b5a5a..0e4e27c455 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -18,6 +18,7 @@ New features * Add form to upload sensor data to the database [see `PR #1481 `_] * Allow editing users in the UI [see `PR #1502 `_] * Smarter toast notifications [see `PR #1530 `_] +* Sticky replay button for asset and sensor pages [see `PR #1739 `_] * Move various warnings to toast notifications [see `PR #1529 `_] * Document how to set a parent asset when creating an asset through the API, and show parent assets in ``flexmeasures show account`` [see `PR #1533 `_] * Add ``flexmeasures show assets`` CLI command for listing public assets and option ``--account `` to list assets owned by a specific account [see `PR #1536 `_] diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index cea5015a9f..3785ead471 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -7,7 +7,7 @@ import traceback import uuid from datetime import datetime, timedelta, timezone -from typing import Any, Callable, Dict, Optional, Type +from typing import Any, Callable, Dict, Optional, Type, List from flask import Flask from flask_sock import ConnectionClosed, Sock @@ -49,6 +49,7 @@ def __init__(self): self.storage_status: Optional[FRBCStorageStatus] = None self.actuator_statuses: Dict[str, FRBCActuatorStatus] = {} # Changed to dict by actuator_id self.resource_id: Optional[str] = None + self.instructions: Optional[List[FRBCInstruction]] = [] def is_complete(self) -> bool: """Check if we have received all necessary data to generate instructions.""" From 88aebb1c57e2b5711c67be095b265003b890582d Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 14 Oct 2025 12:10:05 +0200 Subject: [PATCH 080/171] Instrunction get filtered, then the previous Instructions are revoked and the new ones are sent Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 3785ead471..808ba6ee22 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -20,6 +20,8 @@ HandshakeResponse, ReceptionStatus, ReceptionStatusValues, + RevokableObjects, + RevokeObject, SelectControlType, ResourceManagerDetails, ) @@ -77,6 +79,7 @@ def __init__(self): self.last_compute_time: Optional[datetime] = None self.resource_id: Optional[str] = None self.last_operation_mode: Optional[uuid.UUID] = None + self.sent_instructions: List[FRBCInstruction] = [] # Store sent instructions for revocation def can_compute(self, replanning_frequency: timedelta) -> bool: """Check if enough time has passed since the last compute call.""" @@ -320,6 +323,31 @@ def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: except ConnectionClosed: self.app.logger.warning("Connection closed while sending message (sync)") + def _revoke_previous_instructions( + self, connection_state: ConnectionState, websocket: Sock + ) -> None: + """Revoke all previously sent instructions before sending new ones.""" + if not connection_state.sent_instructions: + return + + self.app.logger.info( + f"Revoking {len(connection_state.sent_instructions)} previous instructions" + ) + + for instruction in connection_state.sent_instructions: + revoke_msg = RevokeObject( + message_id=uuid.uuid4(), + object_type=RevokableObjects.FRBC_Instruction, + object_id=instruction.message_id, + ) + self._send_and_forget(revoke_msg, websocket) + self.app.logger.info( + f"Sent RevokeObject for instruction {instruction.message_id}" + ) + + # Clear the list of sent instructions after revoking + connection_state.sent_instructions.clear() + def _filter_instructions_by_operation_mode( self, instructions: list, connection_state: ConnectionState ) -> list: @@ -565,6 +593,10 @@ def _check_and_generate_instructions( # noqa: C901 frbc_instructions, connection_state ) + # Revoke previous instructions before sending new ones + self._revoke_previous_instructions(connection_state, websocket) + + # Send new instructions and store them for instruction in filtered_instructions: self._send_and_forget(instruction, websocket) self.app.logger.info( @@ -572,6 +604,9 @@ def _check_and_generate_instructions( # noqa: C901 ) # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode + + # Store the sent instructions for future revocation + connection_state.sent_instructions = filtered_instructions.copy() # Log filtering results if len(frbc_instructions) > len(filtered_instructions): From a77600f8e3782deb06b690a9492bec6e98c5320c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 12:16:06 +0200 Subject: [PATCH 081/171] feat: create unique asset for each unique resource Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 808ba6ee22..a0cee656e9 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -12,7 +12,9 @@ from flask import Flask from flask_sock import ConnectionClosed, Sock +from flexmeasures import Asset from flexmeasures.api.common.utils.validators import parse_duration +from flexmeasures.data.services.utils import get_or_create_model from s2python.common import ( ControlType, EnergyManagementRole, @@ -482,6 +484,9 @@ def handle_frbc_actuator_status( self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): + self._assets[resource_id] = get_or_create_model( + Asset, name=resource_id, account_id=self.account.id + ) if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() From 3c48df6b324eda2a7eaa9e633d6ae16a6c03038e Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 12:17:17 +0200 Subject: [PATCH 082/171] dev: debug resource registration as assets Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index a0cee656e9..d4b312217f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -484,9 +484,20 @@ def handle_frbc_actuator_status( self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): - self._assets[resource_id] = get_or_create_model( - Asset, name=resource_id, account_id=self.account.id - ) + try: + self._assets[resource_id] = get_or_create_model( + Asset, name=resource_id, account_id=self.account.id + ) + except Exception as exc: + self.app.logger.warning(str(exc)) + if hasattr(self, "account"): + self.app.logger.debug(f"account: {self.account}") + if hasattr(self.account, "id"): + self.app.logger.debug(f"account ID: {self.account.id}") + else: + self.app.logger.debug("self.account has no ID") + else: + self.app.logger.debug("self has no account") if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() From c9895a62135dbd88538e80a18cd551d4eb627be0 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 12:18:11 +0200 Subject: [PATCH 083/171] style: black Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 39 ++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index d4b312217f..60ab69e4b8 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -51,7 +51,9 @@ def __init__(self): self.system_description: Optional[FRBCSystemDescription] = None self.fill_level_target_profile: Optional[FRBCFillLevelTargetProfile] = None self.storage_status: Optional[FRBCStorageStatus] = None - self.actuator_statuses: Dict[str, FRBCActuatorStatus] = {} # Changed to dict by actuator_id + self.actuator_statuses: Dict[str, FRBCActuatorStatus] = ( + {} + ) # Changed to dict by actuator_id self.resource_id: Optional[str] = None self.instructions: Optional[List[FRBCInstruction]] = [] @@ -67,7 +69,9 @@ def is_complete(self) -> bool: # Check that we have actuator status for ALL actuators in system description if self.system_description.actuators: - required_actuator_ids = {str(actuator.id) for actuator in self.system_description.actuators} + required_actuator_ids = { + str(actuator.id) for actuator in self.system_description.actuators + } received_actuator_ids = set(self.actuator_statuses.keys()) return required_actuator_ids.issubset(received_actuator_ids) @@ -81,7 +85,9 @@ def __init__(self): self.last_compute_time: Optional[datetime] = None self.resource_id: Optional[str] = None self.last_operation_mode: Optional[uuid.UUID] = None - self.sent_instructions: List[FRBCInstruction] = [] # Store sent instructions for revocation + self.sent_instructions: List[FRBCInstruction] = ( + [] + ) # Store sent instructions for revocation def can_compute(self, replanning_frequency: timedelta) -> bool: """Check if enough time has passed since the last compute call.""" @@ -212,7 +218,9 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: message = websocket.receive() try: s2_msg = self.s2_parser.parse_as_any_message(message) - self.app.logger.info(f"Received {s2_msg.message_type} message from client") + self.app.logger.info( + f"Received {s2_msg.message_type} message from client" + ) # Don't log verbose messages verbose_message_types = ["FRBC.UsageForecast"] @@ -480,7 +488,9 @@ def handle_frbc_actuator_status( self.ensure_resource_is_registered(resource_id=resource_id) # Store actuator status by actuator_id to support multiple actuators - self._device_data[resource_id].actuator_statuses[str(message.actuator_id)] = message + self._device_data[resource_id].actuator_statuses[ + str(message.actuator_id) + ] = message self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): @@ -520,15 +530,24 @@ def _check_and_generate_instructions( # noqa: C901 ) # Debug log actuator statuses - if device_data.system_description and device_data.system_description.actuators: - required_actuators = {str(a.id) for a in device_data.system_description.actuators} + if ( + device_data.system_description + and device_data.system_description.actuators + ): + required_actuators = { + str(a.id) for a in device_data.system_description.actuators + } received_actuators = set(device_data.actuator_statuses.keys()) missing_actuators = required_actuators - received_actuators if missing_actuators: - self.app.logger.debug(f"โŒ actuator_status? Hold on.. Missing: {missing_actuators}") + self.app.logger.debug( + f"โŒ actuator_status? Hold on.. Missing: {missing_actuators}" + ) else: - self.app.logger.debug(f"โœ… actuator_status? Go flight! All {len(required_actuators)} actuators received") + self.app.logger.debug( + f"โœ… actuator_status? Go flight! All {len(required_actuators)} actuators received" + ) if device_data is None or not device_data.is_complete(): self.app.logger.info( f"Waiting for more data from device {resource_id} before running the S2FlaskScheduler" @@ -620,7 +639,7 @@ def _check_and_generate_instructions( # noqa: C901 ) # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode - + # Store the sent instructions for future revocation connection_state.sent_instructions = filtered_instructions.copy() From 70ed4734f0ab0e00a028a9304622c9a8e2f894ca Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 11:51:53 +0200 Subject: [PATCH 084/171] feat: switch from to WEBSOCKET_BEARER_TOKEN to FLEXMEASURES_S2_BEARERS (multiple bearer tokens) Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 0156ae400e..94517b0b07 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -21,6 +21,8 @@ from redis import Redis from rq import Queue +from flexmeasures import User +from flexmeasures.data import db from flexmeasures.data.services.job_cache import JobCache @@ -255,7 +257,15 @@ def ws_connection_auth(): auth_header = request.headers.get("Authorization", "") if auth_header.startswith("Bearer "): token = auth_header.removeprefix("Bearer ").strip() - if token == app.config.get("WEBSOCKET_BEARER_TOKEN", None): + user_id = app.config.get("FLEXMEASURES_S2_BEARERS", {}).get(token, None) + if user_id is not None: + + user = db.session.get(User, user_id) + + # Attach account to WebSocket server + s2_ws.account = user.account + app.logger.info("Account authorized for WebSocket connections") + # Initialize S2Scheduler for this WebSocket connection if not already done if getattr(s2_ws, 's2_scheduler', None) is None: from datetime import datetime, timedelta, timezone From 84fff16a64da303c064cbaa3afcb8f2c875a96d9 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 29 Sep 2025 12:16:40 +0200 Subject: [PATCH 085/171] fix: initialize properties with empty defaults Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 60ab69e4b8..8029dc09d9 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -179,6 +179,8 @@ def __init__( self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) self.s2_scheduler = None + self.account = None + self._assets: Dict[str, Asset] = {} def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) From e2d6b8b83284597c66ba59f4cdf33867376499a4 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 12:21:43 +0200 Subject: [PATCH 086/171] dev: debug FLEXMEASURES_S2_BEARERS Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 94517b0b07..a3f57f5e0e 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -257,14 +257,23 @@ def ws_connection_auth(): auth_header = request.headers.get("Authorization", "") if auth_header.startswith("Bearer "): token = auth_header.removeprefix("Bearer ").strip() - user_id = app.config.get("FLEXMEASURES_S2_BEARERS", {}).get(token, None) - if user_id is not None: - - user = db.session.get(User, user_id) - - # Attach account to WebSocket server - s2_ws.account = user.account - app.logger.info("Account authorized for WebSocket connections") + try: + user_id = app.config.get("FLEXMEASURES_S2_BEARERS", {}).get(token, None) + user_can_use_ws = user_id is not None + except Exception as exc: + app.logger.warning(str(exc)) + user_can_use_ws = token == app.config.get( + "WEBSOCKET_BEARER_TOKEN", None + ) + if user_can_use_ws: + + try: + user = db.session.get(User, user_id) + # Attach account to WebSocket server + s2_ws.account = user.account + app.logger.info("Account authorized for WebSocket connections") + except: + app.logger.warning("Failed to fetch User") # Initialize S2Scheduler for this WebSocket connection if not already done if getattr(s2_ws, 's2_scheduler', None) is None: From 337b00fd3b07c489e7515dd3feebe6d157547faa Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 12:28:22 +0200 Subject: [PATCH 087/171] fix: creating an Asset requires an AssetType Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 8029dc09d9..bcb207d090 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -12,7 +12,7 @@ from flask import Flask from flask_sock import ConnectionClosed, Sock -from flexmeasures import Asset +from flexmeasures import Asset, AssetType from flexmeasures.api.common.utils.validators import parse_duration from flexmeasures.data.services.utils import get_or_create_model from s2python.common import ( @@ -497,8 +497,12 @@ def handle_frbc_actuator_status( def ensure_resource_is_registered(self, resource_id: str): try: + asset_type = get_or_create_model(AssetType, name="S2 Resource") self._assets[resource_id] = get_or_create_model( - Asset, name=resource_id, account_id=self.account.id + model_class=Asset, + name=resource_id, + account_id=self.account.id, + generic_asset_type=asset_type, ) except Exception as exc: self.app.logger.warning(str(exc)) From 9eecc40a007b43e59953f5fdeb698f16977cf6f0 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 13:53:14 +0200 Subject: [PATCH 088/171] feat: warn if resource could not be saved as an asset, but still continue Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index bcb207d090..dcb270c9b6 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -505,15 +505,7 @@ def ensure_resource_is_registered(self, resource_id: str): generic_asset_type=asset_type, ) except Exception as exc: - self.app.logger.warning(str(exc)) - if hasattr(self, "account"): - self.app.logger.debug(f"account: {self.account}") - if hasattr(self.account, "id"): - self.app.logger.debug(f"account ID: {self.account.id}") - else: - self.app.logger.debug("self.account has no ID") - else: - self.app.logger.debug("self has no account") + self.app.logger.warning(f"Resource could not be saved as an asset: {str(exc)}") if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() From 7e465a3a207a2b89e6821aef2384fb5c1533137f Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 13:55:08 +0200 Subject: [PATCH 089/171] feat: finish transaction Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index dcb270c9b6..6591e3c847 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -13,6 +13,7 @@ from flask_sock import ConnectionClosed, Sock from flexmeasures import Asset, AssetType +from flexmeasures.data import db from flexmeasures.api.common.utils.validators import parse_duration from flexmeasures.data.services.utils import get_or_create_model from s2python.common import ( @@ -248,6 +249,9 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: websocket=websocket, ) self._handlers.handle_message(self, s2_msg, websocket) + + # Finalize transaction + db.session.commit() except json.JSONDecodeError: self.respond_with_reception_status( subject_message_id=uuid.UUID( From ec09b6a3d85fc2078e3c51a352da35649731acaf Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 14:00:26 +0200 Subject: [PATCH 090/171] fix: remove CLI specific log statement in services/utils.py Signed-off-by: F.N. Claessen --- flexmeasures/data/services/utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/flexmeasures/data/services/utils.py b/flexmeasures/data/services/utils.py index 689190f370..dcee416923 100644 --- a/flexmeasures/data/services/utils.py +++ b/flexmeasures/data/services/utils.py @@ -7,7 +7,6 @@ from copy import deepcopy import inspect -import click from sqlalchemy import JSON, String, cast, literal from flask import current_app from rq import Queue @@ -130,7 +129,6 @@ def get_or_create_model( model = model_class(**init_kwargs) db.session.add(model) db.session.flush() # assign ID - click.echo(f"Created {repr(model)}") return model From 19365f6ec0c451d523229eeb61bbd7646825f1c7 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 14:05:04 +0200 Subject: [PATCH 091/171] feat: register actuators Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 6591e3c847..be5cda902d 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -453,6 +453,8 @@ def handle_frbc_system_description( self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].system_description = message + for actuator in message.actuators: + self.ensure_actuator_is_registered(actuator_id=actuator.id) self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_fill_level_target_profile( @@ -509,10 +511,26 @@ def ensure_resource_is_registered(self, resource_id: str): generic_asset_type=asset_type, ) except Exception as exc: - self.app.logger.warning(f"Resource could not be saved as an asset: {str(exc)}") + self.app.logger.warning( + f"Resource could not be saved as an asset: {str(exc)}" + ) if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() + def ensure_actuator_is_registered(self, actuator_id: str): + try: + asset_type = get_or_create_model(AssetType, name="S2 Actuator") + self._assets[actuator_id] = get_or_create_model( + model_class=Asset, + name=actuator_id, + account_id=self.account.id, + generic_asset_type=asset_type, + ) + except Exception as exc: + self.app.logger.warning( + f"Actuator could not be saved as an asset: {str(exc)}" + ) + def _check_and_generate_instructions( # noqa: C901 self, resource_id: str, websocket: Sock ) -> None: From 9394356923897d4372c2c188ffb80a9cc8da3752 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 14:06:43 +0200 Subject: [PATCH 092/171] feat: register actuator as child asset of resource Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index be5cda902d..b282817163 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -454,7 +454,9 @@ def handle_frbc_system_description( self._device_data[resource_id].system_description = message for actuator in message.actuators: - self.ensure_actuator_is_registered(actuator_id=actuator.id) + self.ensure_actuator_is_registered( + actuator_id=actuator.id, resource_id=resource_id + ) self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_fill_level_target_profile( @@ -517,7 +519,7 @@ def ensure_resource_is_registered(self, resource_id: str): if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - def ensure_actuator_is_registered(self, actuator_id: str): + def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): try: asset_type = get_or_create_model(AssetType, name="S2 Actuator") self._assets[actuator_id] = get_or_create_model( @@ -525,6 +527,7 @@ def ensure_actuator_is_registered(self, actuator_id: str): name=actuator_id, account_id=self.account.id, generic_asset_type=asset_type, + parent_asset=self._assets[resource_id], ) except Exception as exc: self.app.logger.warning( From 50dbe68939e33900598a92d07de6ccb777fb19ae Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 14:09:39 +0200 Subject: [PATCH 093/171] fix: UUID to str Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index b282817163..ffc257d8cd 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -455,7 +455,7 @@ def handle_frbc_system_description( self._device_data[resource_id].system_description = message for actuator in message.actuators: self.ensure_actuator_is_registered( - actuator_id=actuator.id, resource_id=resource_id + actuator_id=str(actuator.id), resource_id=resource_id ) self._check_and_generate_instructions(resource_id, websocket) From 85c9a6225d8ef4e4314027dd03d66f2d50d61089 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 15:32:54 +0200 Subject: [PATCH 094/171] feat: save fill level updates Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 41 ++++++++++++++++++++--------------- flexmeasures/ws/s2_ws_sync.py | 39 +++++++++++++++++++++++++++++++-- 2 files changed, 61 insertions(+), 19 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index a3f57f5e0e..61b9b06e5d 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -271,37 +271,42 @@ def ws_connection_auth(): user = db.session.get(User, user_id) # Attach account to WebSocket server s2_ws.account = user.account + s2_ws.user = user app.logger.info("Account authorized for WebSocket connections") except: app.logger.warning("Failed to fetch User") # Initialize S2Scheduler for this WebSocket connection if not already done - if getattr(s2_ws, 's2_scheduler', None) is None: + if getattr(s2_ws, "s2_scheduler", None) is None: from datetime import datetime, timedelta, timezone - - # Get S2FlaskScheduler class from registered schedulers - scheduler_class = app.data_generators["scheduler"]["S2FlaskScheduler"] - + + # Get S2FlaskScheduler class from registered schedulers + scheduler_class = app.data_generators["scheduler"][ + "S2FlaskScheduler" + ] + # Create scheduler instance with minimal setup for WebSocket usage scheduler = scheduler_class.__new__(scheduler_class) - + # Set basic time parameters now = datetime.now(timezone.utc) - resolution = timedelta(minutes=5) # Match example_schedule_frbc.py resolution - + resolution = timedelta( + minutes=5 + ) # Match example_schedule_frbc.py resolution + # Align to 5-minute boundary minutes_offset = now.minute % 5 start_aligned = now.replace( - minute=now.minute - minutes_offset, - second=0, - microsecond=0 + minute=now.minute - minutes_offset, second=0, microsecond=0 ) - + # Set required attributes for scheduler scheduler.sensor = None scheduler.asset = None scheduler.start = start_aligned - scheduler.end = start_aligned + timedelta(hours=24) # 24-hour planning window + scheduler.end = start_aligned + timedelta( + hours=24 + ) # 24-hour planning window scheduler.resolution = resolution scheduler.belief_time = start_aligned scheduler.round_to_decimals = 6 @@ -311,14 +316,16 @@ def ws_connection_auth(): scheduler.info = {"scheduler": "S2FlaskScheduler"} scheduler.config_deserialized = True scheduler.return_multiple = True - + # Initialize device states storage scheduler.device_states = {} - + # Attach scheduler to WebSocket server s2_ws.s2_scheduler = scheduler - app.logger.info("S2FlaskScheduler initialized for WebSocket connections") - + app.logger.info( + "S2FlaskScheduler initialized for WebSocket connections" + ) + return # Let other before_request hooks handle it app.logger.info( diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index ffc257d8cd..30cfbc0907 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -12,10 +12,13 @@ from flask import Flask from flask_sock import ConnectionClosed, Sock -from flexmeasures import Asset, AssetType +from flexmeasures import Account, Asset, AssetType, Sensor, User from flexmeasures.data import db +from flexmeasures.data.models.time_series import TimedBelief +from flexmeasures.data.utils import save_to_db from flexmeasures.api.common.utils.validators import parse_duration from flexmeasures.data.services.utils import get_or_create_model +from flexmeasures.utils.time_utils import server_now from s2python.common import ( ControlType, EnergyManagementRole, @@ -39,6 +42,7 @@ from s2python.s2_parser import S2Parser from s2python.s2_validation_error import S2ValidationError from s2python.version import S2_VERSION +from timely_beliefs import BeliefsDataFrame # Set up logging logging.basicConfig(level=logging.INFO) @@ -180,7 +184,8 @@ def __init__( self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) self.s2_scheduler = None - self.account = None + self.account: Account | None = None + self.user: User | None = None self._assets: Dict[str, Asset] = {} def _register_default_handlers(self) -> None: @@ -485,6 +490,10 @@ def handle_frbc_storage_status( self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].storage_status = message + self.save_fill_level( + fill_level=message.present_fill_level, + resource_id=resource_id, + ) self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_actuator_status( @@ -534,6 +543,32 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): f"Actuator could not be saved as an asset: {str(exc)}" ) + def save_fill_level(self, resource_id: str, fill_level: float): + try: + asset = self._assets[resource_id] + sensor = get_or_create_model( + model_class=Sensor, + name="fill level", + unit="", + event_resolution=timedelta(0), + asset=asset, + ) + belief = TimedBelief( + sensor=sensor, + source=self.user.data_source, + event_start=server_now(), + event_value=fill_level, + belief_horizon=timedelta(0), + cumulative_probability=0.5, + ) + bdf = BeliefsDataFrame(beliefs=[belief]) + save_to_db(bdf) + + except Exception as exc: + self.app.logger.warning( + f"Fill level could not be saved as sensor data: {str(exc)}" + ) + def _check_and_generate_instructions( # noqa: C901 self, resource_id: str, websocket: Sock ) -> None: From f41edd85b41637e7353636198770dbd74b5e9626 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 15:34:31 +0200 Subject: [PATCH 095/171] feat: catch any errors while committing and proceed to operate without persisting data Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 30cfbc0907..21ce3ccb8c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -256,7 +256,12 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: self._handlers.handle_message(self, s2_msg, websocket) # Finalize transaction - db.session.commit() + try: + db.session.commit() + except Exception as exc: + self.app.logger.warning( + f"Session could not be committed to database: {str(exc)}" + ) except json.JSONDecodeError: self.respond_with_reception_status( subject_message_id=uuid.UUID( From c3649b5e57cca98a75e07a408d41568d3308387c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 15:43:56 +0200 Subject: [PATCH 096/171] feat: rate limit saving fill levels Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 21ce3ccb8c..75596b6305 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -188,6 +188,21 @@ def __init__( self.user: User | None = None self._assets: Dict[str, Asset] = {} + self._minimum_measurement_period: timedelta = timedelta(minutes=5) + self._timers: dict[str, datetime] = dict() + + def _is_timer_due(self, name: str): + now = datetime.now() + due_time = self._timers.get(name, now - self._minimum_measurement_period) + if due_time <= now: + self._timers[name] = now + self._minimum_measurement_period + return True + else: + self.app.logger.debug( + f"Timer for {name} is not due until {self._timers[name]}" + ) + return False + def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) @@ -549,6 +564,8 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): ) def save_fill_level(self, resource_id: str, fill_level: float): + if not self._is_timer_due(f"fill level"): + return try: asset = self._assets[resource_id] sensor = get_or_create_model( From db7ad02db0efc40e4098787a527da3b1aee2eb0a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 15:47:57 +0200 Subject: [PATCH 097/171] fix: rate limit per resource Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 75596b6305..be1f08919f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -564,7 +564,7 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): ) def save_fill_level(self, resource_id: str, fill_level: float): - if not self._is_timer_due(f"fill level"): + if not self._is_timer_due(f"fill level for {resource_id}"): return try: asset = self._assets[resource_id] From 8d5d9cb7a595c45366c171e4ddd8ac119120694a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:01:48 +0200 Subject: [PATCH 098/171] refactor: move timer logic to decorator Signed-off-by: F.N. Claessen --- flexmeasures/utils/coding_utils.py | 37 ++++++++++++++++++++++++++++++ flexmeasures/ws/s2_ws_sync.py | 4 ++-- 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/flexmeasures/utils/coding_utils.py b/flexmeasures/utils/coding_utils.py index f10a6195d7..1dbaf26841 100644 --- a/flexmeasures/utils/coding_utils.py +++ b/flexmeasures/utils/coding_utils.py @@ -177,3 +177,40 @@ def find_classes_modules(module, superclass, skiptest=True): def get_classes_module(module, superclass, skiptest=True) -> dict: return dict(find_classes_modules(module, superclass, skiptest=skiptest)) + + +def only_if_timer_due(kwarg_name: str | None = None): + """ + Decorator that runs a method only if its timer is due. + + Timer name is derived from: + "" + " for " (if kwarg_name is provided) + + :param kwarg_name: name of the kwarg to include in the timer name (optional) + """ + + def decorator(func): + sig = inspect.signature(func) # get function signature + + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + timer_name = func.__name__ # start with the function name + + if kwarg_name: + bound_args = sig.bind(self, *args, **kwargs) + bound_args.apply_defaults() + if kwarg_name in bound_args.arguments: + kwarg_value = bound_args.arguments[kwarg_name] + timer_name += f" for {kwarg_value}" + else: + raise ValueError( + f"Keyword '{kwarg_name}' not found in function call" + ) + + if not self._is_timer_due(timer_name): + return # skip if timer not due + return func(self, *args, **kwargs) + + return wrapper + + return decorator diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index be1f08919f..d704739f77 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -18,6 +18,7 @@ from flexmeasures.data.utils import save_to_db from flexmeasures.api.common.utils.validators import parse_duration from flexmeasures.data.services.utils import get_or_create_model +from flexmeasures.utils.coding_utils import only_if_timer_due from flexmeasures.utils.time_utils import server_now from s2python.common import ( ControlType, @@ -563,9 +564,8 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): f"Actuator could not be saved as an asset: {str(exc)}" ) + @only_if_timer_due("resource_id") def save_fill_level(self, resource_id: str, fill_level: float): - if not self._is_timer_due(f"fill level for {resource_id}"): - return try: asset = self._assets[resource_id] sensor = get_or_create_model( From 5e99ff05182686ef3d17ef47b0495851a73a552b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:02:39 +0200 Subject: [PATCH 099/171] fix: create sensor Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index d704739f77..6846a34c89 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -573,7 +573,7 @@ def save_fill_level(self, resource_id: str, fill_level: float): name="fill level", unit="", event_resolution=timedelta(0), - asset=asset, + generic_asset=asset, ) belief = TimedBelief( sensor=sensor, From 2e580f5eb96c901adda222b0919d0298a2fa2f89 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:06:41 +0200 Subject: [PATCH 100/171] feat: still attempt to create sensor Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 6846a34c89..e25ec82f21 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -575,6 +575,9 @@ def save_fill_level(self, resource_id: str, fill_level: float): event_resolution=timedelta(0), generic_asset=asset, ) + except Exception as exc: + self.app.logger.warning(f"Fill level sensor could not be saved: {str(exc)}") + try: belief = TimedBelief( sensor=sensor, source=self.user.data_source, From af154fa01c3be22df962c6ff53d528f5148491be Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:08:08 +0200 Subject: [PATCH 101/171] fix: create data source for user Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index e25ec82f21..edade55a92 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -18,6 +18,7 @@ from flexmeasures.data.utils import save_to_db from flexmeasures.api.common.utils.validators import parse_duration from flexmeasures.data.services.utils import get_or_create_model +from flexmeasures.data.services.data_sources import get_or_create_source from flexmeasures.utils.coding_utils import only_if_timer_due from flexmeasures.utils.time_utils import server_now from s2python.common import ( @@ -578,9 +579,10 @@ def save_fill_level(self, resource_id: str, fill_level: float): except Exception as exc: self.app.logger.warning(f"Fill level sensor could not be saved: {str(exc)}") try: + data_source = get_or_create_source(self.user) belief = TimedBelief( sensor=sensor, - source=self.user.data_source, + source=data_source, event_start=server_now(), event_value=fill_level, belief_horizon=timedelta(0), From 5d9935fd2d0d7b150b88362d5ed7c54f336194c4 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:17:09 +0200 Subject: [PATCH 102/171] refactor: towards reusing class method Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index edade55a92..6b4960fe2c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -20,6 +20,7 @@ from flexmeasures.data.services.utils import get_or_create_model from flexmeasures.data.services.data_sources import get_or_create_source from flexmeasures.utils.coding_utils import only_if_timer_due +from flexmeasures.utils.flexmeasures_inflection import capitalize from flexmeasures.utils.time_utils import server_now from s2python.common import ( ControlType, @@ -567,17 +568,20 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): @only_if_timer_due("resource_id") def save_fill_level(self, resource_id: str, fill_level: float): + sensor_name = "fill level" try: asset = self._assets[resource_id] sensor = get_or_create_model( model_class=Sensor, - name="fill level", + name=sensor_name, unit="", event_resolution=timedelta(0), generic_asset=asset, ) except Exception as exc: - self.app.logger.warning(f"Fill level sensor could not be saved: {str(exc)}") + self.app.logger.warning( + f"{capitalize(sensor_name)} sensor could not be saved: {str(exc)}" + ) try: data_source = get_or_create_source(self.user) belief = TimedBelief( @@ -593,7 +597,7 @@ def save_fill_level(self, resource_id: str, fill_level: float): except Exception as exc: self.app.logger.warning( - f"Fill level could not be saved as sensor data: {str(exc)}" + f"{capitalize(sensor_name)} could not be saved as sensor data: {str(exc)}" ) def _check_and_generate_instructions( # noqa: C901 From e553867ac65e567427d4b439f1a71af988c51146 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:18:31 +0200 Subject: [PATCH 103/171] feat: accept multiple kwargs for separating timers Signed-off-by: F.N. Claessen --- flexmeasures/utils/coding_utils.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/flexmeasures/utils/coding_utils.py b/flexmeasures/utils/coding_utils.py index 1dbaf26841..99e3e40695 100644 --- a/flexmeasures/utils/coding_utils.py +++ b/flexmeasures/utils/coding_utils.py @@ -179,14 +179,14 @@ def get_classes_module(module, superclass, skiptest=True) -> dict: return dict(find_classes_modules(module, superclass, skiptest=skiptest)) -def only_if_timer_due(kwarg_name: str | None = None): +def only_if_timer_due(*kwarg_names): """ Decorator that runs a method only if its timer is due. Timer name is derived from: - "" + " for " (if kwarg_name is provided) + " for , , ..." - :param kwarg_name: name of the kwarg to include in the timer name (optional) + :param kwarg_names: names of kwargs to include in the timer name """ def decorator(func): @@ -196,16 +196,18 @@ def decorator(func): def wrapper(self, *args, **kwargs): timer_name = func.__name__ # start with the function name - if kwarg_name: + if kwarg_names: bound_args = sig.bind(self, *args, **kwargs) bound_args.apply_defaults() - if kwarg_name in bound_args.arguments: - kwarg_value = bound_args.arguments[kwarg_name] - timer_name += f" for {kwarg_value}" - else: - raise ValueError( - f"Keyword '{kwarg_name}' not found in function call" - ) + + values = [] + for name in kwarg_names: + if name in bound_args.arguments: + values.append(str(bound_args.arguments[name])) + else: + raise ValueError(f"Keyword '{name}' not found in function call") + + timer_name += " for " + ", ".join(values) if not self._is_timer_due(timer_name): return # skip if timer not due From 556147c9df2c7445bacd6f6f731c71e4fca40e5a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:20:59 +0200 Subject: [PATCH 104/171] refactor: save_fill_level becomes save_event Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 6b4960fe2c..073006174b 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -513,8 +513,9 @@ def handle_frbc_storage_status( self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].storage_status = message - self.save_fill_level( - fill_level=message.present_fill_level, + self.save_event( + sensor_name="fill level", + event_value=message.present_fill_level, resource_id=resource_id, ) self._check_and_generate_instructions(resource_id, websocket) @@ -566,9 +567,8 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): f"Actuator could not be saved as an asset: {str(exc)}" ) - @only_if_timer_due("resource_id") - def save_fill_level(self, resource_id: str, fill_level: float): - sensor_name = "fill level" + @only_if_timer_due("sensor_name", "resource_id") + def save_event(self, sensor_name: str, resource_id: str, event_value: float): try: asset = self._assets[resource_id] sensor = get_or_create_model( @@ -588,7 +588,7 @@ def save_fill_level(self, resource_id: str, fill_level: float): sensor=sensor, source=data_source, event_start=server_now(), - event_value=fill_level, + event_value=event_value, belief_horizon=timedelta(0), cumulative_probability=0.5, ) From c0e59e04d5a7f45096ffab796f0fe5016a51577b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:43:10 +0200 Subject: [PATCH 105/171] feat: timers due on the tick Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 073006174b..3e6017773f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -4,6 +4,7 @@ import json import logging +import math import traceback import uuid from datetime import datetime, timedelta, timezone @@ -194,11 +195,26 @@ def __init__( self._minimum_measurement_period: timedelta = timedelta(minutes=5) self._timers: dict[str, datetime] = dict() - def _is_timer_due(self, name: str): + def _is_timer_due(self, name: str) -> bool: now = datetime.now() due_time = self._timers.get(name, now - self._minimum_measurement_period) if due_time <= now: - self._timers[name] = now + self._minimum_measurement_period + # Get total seconds of the period + period_seconds = self._minimum_measurement_period.total_seconds() + + # Seconds since start of the hour + seconds_since_hour = now.minute * 60 + now.second + now.microsecond / 1e6 + + # Ceil to next multiple of period_seconds + next_tick_seconds = ( + math.ceil(seconds_since_hour / period_seconds) * period_seconds + ) + + # Compute next due datetime + next_due = now.replace(minute=0, second=0, microsecond=0) + timedelta( + seconds=next_tick_seconds + ) + self._timers[name] = next_due return True else: self.app.logger.debug( From 2f28c1c73ebd9e265923f0644a2a7d82be82b2ed Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:53:57 +0200 Subject: [PATCH 106/171] feat: events recorded on the tick Signed-off-by: F.N. Claessen --- flexmeasures/utils/time_utils.py | 5 +++++ flexmeasures/ws/s2_ws_sync.py | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/flexmeasures/utils/time_utils.py b/flexmeasures/utils/time_utils.py index 623ebf7654..ca949a4ff2 100644 --- a/flexmeasures/utils/time_utils.py +++ b/flexmeasures/utils/time_utils.py @@ -21,6 +21,11 @@ def server_now() -> datetime: return datetime.now(get_timezone()) +def floored_server_now(resolution: timedelta) -> datetime: + """Return the current server time floored to the nearest multiple of `resolution`.""" + return datetime.min + ((server_now() - datetime.min) // resolution) * resolution + + def ensure_local_timezone( dt: pd.Timestamp | datetime, tz_name: str = "Europe/Amsterdam" ) -> pd.Timestamp | datetime: diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 3e6017773f..ca4bde9fcd 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -22,7 +22,7 @@ from flexmeasures.data.services.data_sources import get_or_create_source from flexmeasures.utils.coding_utils import only_if_timer_due from flexmeasures.utils.flexmeasures_inflection import capitalize -from flexmeasures.utils.time_utils import server_now +from flexmeasures.utils.time_utils import floored_server_now from s2python.common import ( ControlType, EnergyManagementRole, @@ -603,7 +603,7 @@ def save_event(self, sensor_name: str, resource_id: str, event_value: float): belief = TimedBelief( sensor=sensor, source=data_source, - event_start=server_now(), + event_start=floored_server_now(self._minimum_measurement_period), event_value=event_value, belief_horizon=timedelta(0), cumulative_probability=0.5, From 50186762e743ed3b0952c98885551e989a7e6223 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 22 Oct 2025 16:55:51 +0200 Subject: [PATCH 107/171] fix: truthfully report the time of recording Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index ca4bde9fcd..c53620fda2 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -22,7 +22,7 @@ from flexmeasures.data.services.data_sources import get_or_create_source from flexmeasures.utils.coding_utils import only_if_timer_due from flexmeasures.utils.flexmeasures_inflection import capitalize -from flexmeasures.utils.time_utils import floored_server_now +from flexmeasures.utils.time_utils import floored_server_now, server_now from s2python.common import ( ControlType, EnergyManagementRole, @@ -605,7 +605,7 @@ def save_event(self, sensor_name: str, resource_id: str, event_value: float): source=data_source, event_start=floored_server_now(self._minimum_measurement_period), event_value=event_value, - belief_horizon=timedelta(0), + belief_time=server_now(), cumulative_probability=0.5, ) bdf = BeliefsDataFrame(beliefs=[belief]) From 980f27aaaf740515706efe969f00cf2adb7714e8 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Thu, 23 Oct 2025 08:01:18 +0200 Subject: [PATCH 108/171] Improved logging Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 292 +++++++++++++--------------------- 1 file changed, 111 insertions(+), 181 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 808ba6ee22..346ad4ced7 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -56,11 +56,7 @@ def __init__(self): def is_complete(self) -> bool: """Check if we have received all necessary data to generate instructions.""" # Check basic required data - if ( - self.system_description is None - or self.fill_level_target_profile is None - or self.storage_status is None - ): + if self.system_description is None or self.fill_level_target_profile is None or self.storage_status is None: return False # Check that we have actuator status for ALL actuators in system description @@ -85,9 +81,7 @@ def can_compute(self, replanning_frequency: timedelta) -> bool: """Check if enough time has passed since the last compute call.""" if self.last_compute_time is None: return True - return ( - datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency - ) + return datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency def update_compute_time(self) -> None: """Update the last compute time to now.""" @@ -136,9 +130,7 @@ def handle_message( f"Ignoring message of type {type(msg)}; no handler is registered", ) - def register_handler( - self, msg_type: Type[S2Message], handler: Callable[..., Any] - ) -> None: + def register_handler(self, msg_type: Type[S2Message], handler: Callable[..., Any]) -> None: self.handlers[msg_type] = handler @@ -159,15 +151,9 @@ def __init__( self._handlers = MessageHandlersSync() self.s2_parser = S2Parser() self._connections: Dict[str, Sock] = {} - self._device_data: Dict[str, FRBCDeviceData] = ( - {} - ) # Store device data by resource_id - self._websocket_to_resource: Dict[Sock, str] = ( - {} - ) # Map websocket to resource_id - self._connection_states: Dict[Sock, ConnectionState] = ( - {} - ) # Track connection state for rate limiting + self._device_data: Dict[str, FRBCDeviceData] = {} # Store device data by resource_id + self._websocket_to_resource: Dict[Sock, str] = {} # Map websocket to resource_id + self._connection_states: Dict[Sock, ConnectionState] = {} # Track connection state for rate limiting self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) self.s2_scheduler = None @@ -175,22 +161,12 @@ def __init__( def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) - self._handlers.register_handler( - ResourceManagerDetails, self.handle_ResourceManagerDetails - ) + self._handlers.register_handler(ResourceManagerDetails, self.handle_ResourceManagerDetails) # Register FRBC message handlers - self._handlers.register_handler( - FRBCSystemDescription, self.handle_frbc_system_description - ) - self._handlers.register_handler( - FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile - ) - self._handlers.register_handler( - FRBCStorageStatus, self.handle_frbc_storage_status - ) - self._handlers.register_handler( - FRBCActuatorStatus, self.handle_frbc_actuator_status - ) + self._handlers.register_handler(FRBCSystemDescription, self.handle_frbc_system_description) + self._handlers.register_handler(FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile) + self._handlers.register_handler(FRBCStorageStatus, self.handle_frbc_storage_status) + self._handlers.register_handler(FRBCActuatorStatus, self.handle_frbc_actuator_status) def _ws_handler(self, ws: Sock) -> None: try: @@ -201,34 +177,62 @@ def _ws_handler(self, ws: Sock) -> None: def _handle_websocket_connection(self, websocket: Sock) -> None: client_id = str(uuid.uuid4()) - self.app.logger.info(f"Client {client_id} connected (sync)") + self.app.logger.info(f"๐Ÿ”Œ Client {client_id} connected") self._connections[client_id] = websocket # Initialize connection state for rate limiting self._connection_states[websocket] = ConnectionState() try: while True: message = websocket.receive() + s2_msg = None try: s2_msg = self.s2_parser.parse_as_any_message(message) - self.app.logger.info(f"Received {s2_msg.message_type} message from client") + self.app.logger.info(f"๐Ÿ“จ Received {s2_msg.message_type} from client {client_id}") # Don't log verbose messages verbose_message_types = ["FRBC.UsageForecast"] if s2_msg.message_type not in verbose_message_types: self.app.logger.debug(s2_msg.to_json()) except json.JSONDecodeError: + self.app.logger.warning(f"โŒ Invalid JSON from client {client_id}") self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Not valid json.", websocket=websocket, ) continue + except S2ValidationError as e: + self.app.logger.warning(f"โŒ S2 validation error from client {client_id}: {str(e)}") + try: + json_msg = json.loads(message) + message_id = json_msg.get("message_id") + if message_id: + self.respond_with_reception_status( + subject_message_id=message_id, + status=ReceptionStatusValues.INVALID_MESSAGE, + diagnostic_label=str(e), + websocket=websocket, + ) + else: + self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Message appears valid json but could not find a message_id field.", + websocket=websocket, + ) + except json.JSONDecodeError: + self.respond_with_reception_status( + subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + status=ReceptionStatusValues.INVALID_DATA, + diagnostic_label="Not valid json.", + websocket=websocket, + ) + continue + + # Handle valid message try: if not isinstance(s2_msg, ReceptionStatus): - self.respond_with_reception_status( subject_message_id=s2_msg.message_id, status=ReceptionStatusValues.OK, @@ -236,39 +240,11 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: websocket=websocket, ) self._handlers.handle_message(self, s2_msg, websocket) - except json.JSONDecodeError: - self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Not valid json.", - websocket=websocket, - ) - except S2ValidationError as e: - json_msg = json.loads(message) - message_id = json_msg.get("message_id") - if message_id: - self.respond_with_reception_status( - subject_message_id=message_id, - status=ReceptionStatusValues.INVALID_MESSAGE, - diagnostic_label=str(e), - websocket=websocket, - ) - else: - self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Message appears valid json but could not find a message_id field.", - websocket=websocket, - ) except Exception as e: - self.app.logger.error("Error processing message: %s", str(e)) + self.app.logger.error(f"๐Ÿ’ฅ Error processing message from client {client_id}: {str(e)}") raise except ConnectionClosed: - self.app.logger.info(f"Connection with client {client_id} closed (sync)") + self.app.logger.info(f"๐Ÿ”Œ Client {client_id} disconnected") finally: if client_id in self._connections: del self._connections[client_id] @@ -276,10 +252,12 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: if websocket in self._websocket_to_resource: resource_id = self._websocket_to_resource[websocket] del self._websocket_to_resource[websocket] + self.app.logger.debug(f"๐Ÿงน Cleaned up resource mapping for {resource_id}") # Clean up device data if resource_id in self._device_data: del self._device_data[resource_id] + self.app.logger.debug(f"๐Ÿงน Cleaned up device data for {resource_id}") # Clean up device state from scheduler if available if ( @@ -288,12 +266,12 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: and hasattr(self.s2_scheduler, "remove_device_state") ): self.s2_scheduler.remove_device_state(resource_id) + self.app.logger.debug(f"๐Ÿงน Cleaned up scheduler state for {resource_id}") # Clean up connection state if websocket in self._connection_states: del self._connection_states[websocket] - - self.app.logger.info(f"Client {client_id} disconnected (sync)") + self.app.logger.debug(f"๐Ÿงน Cleaned up connection state for {client_id}") def respond_with_reception_status( self, @@ -307,32 +285,30 @@ def respond_with_reception_status( status=status, diagnostic_label=diagnostic_label, ) - self.app.logger.info( - f"Sending reception status {status} for message {subject_message_id} (sync)", + # Use emoji based on status + status_emoji = ( + "โœ…" + if status == ReceptionStatusValues.OK + else "โš ๏ธ" if status == ReceptionStatusValues.INVALID_MESSAGE else "โŒ" ) + self.app.logger.debug(f"{status_emoji} ReceptionStatus: {status.value} for message {subject_message_id}") try: websocket.send(response.to_json()) except ConnectionClosed: - self.app.logger.warning( - "Connection closed while sending reception status (sync)" - ) + self.app.logger.warning("๐Ÿ”Œ Connection closed while sending reception status") def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: try: websocket.send(s2_msg.to_json()) except ConnectionClosed: - self.app.logger.warning("Connection closed while sending message (sync)") + self.app.logger.warning("๐Ÿ”Œ Connection closed while sending message") - def _revoke_previous_instructions( - self, connection_state: ConnectionState, websocket: Sock - ) -> None: + def _revoke_previous_instructions(self, connection_state: ConnectionState, websocket: Sock) -> None: """Revoke all previously sent instructions before sending new ones.""" if not connection_state.sent_instructions: return - self.app.logger.info( - f"Revoking {len(connection_state.sent_instructions)} previous instructions" - ) + self.app.logger.info(f"๐Ÿ—‘๏ธ Revoking {len(connection_state.sent_instructions)} previous instructions") for instruction in connection_state.sent_instructions: revoke_msg = RevokeObject( @@ -341,16 +317,12 @@ def _revoke_previous_instructions( object_id=instruction.message_id, ) self._send_and_forget(revoke_msg, websocket) - self.app.logger.info( - f"Sent RevokeObject for instruction {instruction.message_id}" - ) + self.app.logger.debug(f"๐Ÿ—‘๏ธ Revoked instruction {instruction}") # Clear the list of sent instructions after revoking connection_state.sent_instructions.clear() - def _filter_instructions_by_operation_mode( - self, instructions: list, connection_state: ConnectionState - ) -> list: + def _filter_instructions_by_operation_mode(self, instructions: list, connection_state: ConnectionState) -> list: """Filter instructions to only include those with different operation_mode than the previous instruction.""" if not instructions: return instructions @@ -361,21 +333,16 @@ def _filter_instructions_by_operation_mode( for instruction in instructions: # Always include the first instruction if we haven't sent any before # or if the operation mode is different from the last sent instruction - if ( - last_operation_mode is None - or instruction.operation_mode != last_operation_mode - ): + if last_operation_mode is None or instruction.operation_mode != last_operation_mode: filtered.append(instruction) last_operation_mode = instruction.operation_mode return filtered - def handle_handshake( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_handshake(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, Handshake): return - self.app.logger.info("Received Handshake (sync)") + self.app.logger.info("๐Ÿค Received Handshake") self.app.logger.debug(message.to_json()) if S2_VERSION not in message.supported_protocol_versions: @@ -388,52 +355,44 @@ def handle_handshake( selected_protocol_version=S2_VERSION, ) self._send_and_forget(handshake_response, websocket) - self.app.logger.info("HandshakeResponse sent (sync)") + self.app.logger.info(f"๐Ÿค HandshakeResponse sent (protocol version: {S2_VERSION})") self.app.logger.debug(handshake_response) # If client is RM, send control type selection if hasattr(message, "role") and message.role == EnergyManagementRole.RM: - self.app.logger.debug("Sending control type selection (sync)") + self.app.logger.info("๐Ÿ“ค Sending control type selection (FRBC)") select_control_type = SelectControlType( message_id=uuid.uuid4(), control_type=ControlType.FILL_RATE_BASED_CONTROL, ) self._send_and_forget(select_control_type, websocket) - self.app.logger.info("SelectControlType sent (sync)") self.app.logger.debug(select_control_type) - def handle_reception_status( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_reception_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, ReceptionStatus): return self.app.logger.debug(message.to_json()) - def handle_ResourceManagerDetails( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_ResourceManagerDetails(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, ResourceManagerDetails): return - self.app.logger.info( - "Received ResourceManagerDetails (sync): %s", message.to_json() - ) + resource_id = str(message.resource_id) + self.app.logger.info(f"๐Ÿ“‹ Received ResourceManagerDetails for resource: {resource_id}") # Store the resource_id from ResourceManagerDetails for device identification - resource_id = str(message.resource_id) self._websocket_to_resource[websocket] = resource_id if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() self._device_data[resource_id].resource_id = resource_id - def handle_frbc_system_description( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_frbc_system_description(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, FRBCSystemDescription): return - self.app.logger.info("Received FRBCSystemDescription: %s", message.to_json()) + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + self.app.logger.info(f"๐Ÿ”ง Received FRBCSystemDescription for {resource_id}") + self.app.logger.debug(message.to_json()) # Get resource_id from websocket mapping - resource_id = self._websocket_to_resource.get(websocket, "default_resource") self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].system_description = message @@ -444,37 +403,34 @@ def handle_frbc_fill_level_target_profile( ) -> None: if not isinstance(message, FRBCFillLevelTargetProfile): return - self.app.logger.info( - "Received FRBCFillLevelTargetProfile: %s", message.to_json() - ) - resource_id = self._websocket_to_resource.get(websocket, "default_resource") + self.app.logger.info(f"๐ŸŽฏ Received FRBCFillLevelTargetProfile for {resource_id}") + self.app.logger.debug(message.to_json()) + self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].fill_level_target_profile = message self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_storage_status( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_frbc_storage_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, FRBCStorageStatus): return - self.app.logger.info("Received FRBCStorageStatus: %s", message.to_json()) - resource_id = self._websocket_to_resource.get(websocket, "default_resource") + self.app.logger.info(f"๐Ÿ”‹ Received FRBCStorageStatus for {resource_id}") + self.app.logger.debug(message.to_json()) + self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].storage_status = message self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_actuator_status( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_frbc_actuator_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, FRBCActuatorStatus): return - self.app.logger.info("Received FRBCActuatorStatus: %s", message.to_json()) - resource_id = self._websocket_to_resource.get(websocket, "default_resource") + self.app.logger.info(f"โš™๏ธ Received FRBCActuatorStatus for {resource_id} (actuator: {message.actuator_id})") + self.app.logger.debug(message.to_json()) + self.ensure_resource_is_registered(resource_id=resource_id) # Store actuator status by actuator_id to support multiple actuators @@ -485,9 +441,7 @@ def ensure_resource_is_registered(self, resource_id: str): if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - def _check_and_generate_instructions( # noqa: C901 - self, resource_id: str, websocket: Sock - ) -> None: + def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> None: # noqa: C901 """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) if device_data: @@ -498,9 +452,7 @@ def _check_and_generate_instructions( # noqa: C901 "storage_status", ): self.app.logger.debug( - f"โœ… {attr}? Go flight!" - if getattr(device_data, attr, None) is not None - else f"โŒ {attr}? Hold on.." + f" โœ… {attr}" if getattr(device_data, attr, None) is not None else f" โณ {attr} (waiting...)" ) # Debug log actuator statuses @@ -510,27 +462,21 @@ def _check_and_generate_instructions( # noqa: C901 missing_actuators = required_actuators - received_actuators if missing_actuators: - self.app.logger.debug(f"โŒ actuator_status? Hold on.. Missing: {missing_actuators}") + self.app.logger.debug(f" โณ actuator_status (waiting for: {missing_actuators})") else: - self.app.logger.debug(f"โœ… actuator_status? Go flight! All {len(required_actuators)} actuators received") + self.app.logger.debug(f" โœ… actuator_status (all {len(required_actuators)} received)") if device_data is None or not device_data.is_complete(): - self.app.logger.info( - f"Waiting for more data from device {resource_id} before running the S2FlaskScheduler" - ) + self.app.logger.debug(f"โณ Waiting for more data from {resource_id} before scheduling") return # Check rate limiting based on FLEXMEASURES_S2_REPLANNING_FREQUENCY connection_state = self._connection_states.get(websocket) if connection_state is None: - self.app.logger.warning( - f"No connection state found for device {resource_id}" - ) + self.app.logger.warning(f"โš ๏ธ No connection state found for {resource_id}") return # Parse replanning frequency from config - replanning_freq_str = self.app.config.get( - "FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M" - ) + replanning_freq_str = self.app.config.get("FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M") try: replanning_frequency = parse_duration(replanning_freq_str) if replanning_frequency is None: @@ -538,31 +484,22 @@ def _check_and_generate_instructions( # noqa: C901 if not isinstance(replanning_frequency, timedelta): # Handle isodate.Duration objects by converting to timedelta # For simplicity, assume it's a basic duration that can be converted - replanning_frequency = timedelta( - seconds=replanning_frequency.total_seconds() - ) + replanning_frequency = timedelta(seconds=replanning_frequency.total_seconds()) except Exception as e: - self.app.logger.error( - f"Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}" - ) + self.app.logger.error(f"โŒ Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}") replanning_frequency = timedelta(minutes=5) # Default to 5 minutes # Check if we can compute based on rate limiting if not connection_state.can_compute(replanning_frequency): - time_since_last = ( - datetime.now(timezone.utc) - connection_state.last_compute_time - ) + time_since_last = datetime.now(timezone.utc) - connection_state.last_compute_time remaining_time = replanning_frequency - time_since_last - self.app.logger.info( - f"Rate limiting: Cannot generate instructions for device {resource_id}. " - f"Last compute was {time_since_last.total_seconds():.1f}s ago. " - f"Need to wait {remaining_time.total_seconds():.1f}s more." + self.app.logger.debug( + f"โฑ๏ธ Rate limit: Last compute {time_since_last.total_seconds():.1f}s ago, " + f"wait {remaining_time.total_seconds():.1f}s more" ) return - self.app.logger.info( - f"All data received for device {resource_id}, generating instructions" - ) + self.app.logger.info(f"๐Ÿš€ All data received for {resource_id}, starting scheduler") try: # Use the S2FlaskScheduler to create and store device state @@ -584,14 +521,8 @@ def _check_and_generate_instructions( # noqa: C901 schedule_results = self.s2_scheduler.compute() # Filter and send generated instructions - frbc_instructions = [ - result - for result in schedule_results - if isinstance(result, FRBCInstruction) - ] - filtered_instructions = self._filter_instructions_by_operation_mode( - frbc_instructions, connection_state - ) + frbc_instructions = [result for result in schedule_results if isinstance(result, FRBCInstruction)] + filtered_instructions = self._filter_instructions_by_operation_mode(frbc_instructions, connection_state) # Revoke previous instructions before sending new ones self._revoke_previous_instructions(connection_state, websocket) @@ -600,20 +531,23 @@ def _check_and_generate_instructions( # noqa: C901 for instruction in filtered_instructions: self._send_and_forget(instruction, websocket) self.app.logger.info( - f"Sent FRBC instruction: {instruction.to_json()}" + f"๐Ÿ“ค Sent FRBC instruction (mode: {instruction.operation_mode}, factor: {instruction.operation_mode_factor})" ) + self.app.logger.debug(f" Full instruction: {instruction.to_json()}") # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode - + # Store the sent instructions for future revocation connection_state.sent_instructions = filtered_instructions.copy() # Log filtering results if len(frbc_instructions) > len(filtered_instructions): self.app.logger.info( - f"Filtered instructions: {len(frbc_instructions)} -> {len(filtered_instructions)} " + f"๐Ÿ”ฝ Filtered instructions: {len(frbc_instructions)} โ†’ {len(filtered_instructions)} " f"(reduced by {len(frbc_instructions) - len(filtered_instructions)})" ) + elif len(filtered_instructions) > 0: + self.app.logger.info(f"โœ… Sent {len(filtered_instructions)} instruction(s)") # Process non-instruction results for result in schedule_results: @@ -622,14 +556,10 @@ def _check_and_generate_instructions( # noqa: C901 pass else: # Scheduler not available - log warning and skip instruction generation - self.app.logger.warning( - f"S2FlaskScheduler not available for device {resource_id}, cannot generate instructions" - ) + self.app.logger.warning(f"โš ๏ธ S2FlaskScheduler not available for {resource_id}") except Exception as e: - self.app.logger.error( - f"Error generating instructions for device {resource_id}: {e}" - ) + self.app.logger.error(f"๐Ÿ’ฅ Error generating instructions for {resource_id}: {e}") import traceback self.app.logger.debug(f"Traceback: {traceback.format_exc()}") From 87c3ee9a5b94de83d0dba4d217cdfcf081bbd470 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Thu, 23 Oct 2025 08:03:28 +0200 Subject: [PATCH 109/171] Added InstructionStatusUpdate handler Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 346ad4ced7..5e10fa8ad1 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -31,6 +31,7 @@ FRBCStorageStatus, FRBCActuatorStatus, FRBCInstruction, + FRBCInstructionStatusUpdate, ) from s2python.message import S2Message from s2python.s2_parser import S2Parser @@ -167,6 +168,7 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler(FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile) self._handlers.register_handler(FRBCStorageStatus, self.handle_frbc_storage_status) self._handlers.register_handler(FRBCActuatorStatus, self.handle_frbc_actuator_status) + self._handlers.register_handler(FRBCInstructionStatusUpdate, self.handle_frbc_instruction_status_update) def _ws_handler(self, ws: Sock) -> None: try: @@ -437,6 +439,33 @@ def handle_frbc_actuator_status(self, _: "S2FlaskWSServerSync", message: S2Messa self._device_data[resource_id].actuator_statuses[str(message.actuator_id)] = message self._check_and_generate_instructions(resource_id, websocket) + def handle_frbc_instruction_status_update( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, FRBCInstructionStatusUpdate): + return + + # Map status types to emojis for better visibility + status_emojis = { + "NEW": "๐Ÿ†•", + "ACCEPTED": "โœ…", + "STARTED": "โ–ถ๏ธ", + "SUCCEEDED": "๐ŸŽ‰", + "ABORTED": "๐Ÿ›‘", + "REJECTED": "โŒ", + "REVOKED": "๐Ÿ—‘๏ธ", + } + + status_type = str(message.status_type) if hasattr(message, "status_type") else "UNKNOWN" + emoji = status_emojis.get(status_type, "๐Ÿ“Š") + + resource_id = self._websocket_to_resource.get(websocket, "unknown") + self.app.logger.info( + f"{emoji} Instruction Status Update from {resource_id}: " + f"instruction_id={message.instruction_id}, status={status_type}" + ) + self.app.logger.debug(message.to_json()) + def ensure_resource_is_registered(self, resource_id: str): if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() From 2b5bb0bff62c2ca777160b75c48ad07ea932af5d Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Thu, 23 Oct 2025 11:12:58 +0200 Subject: [PATCH 110/171] Fixed alignment issue and added db timeout Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 808ba6ee22..7f0c5bed90 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -580,7 +580,7 @@ def _check_and_generate_instructions( # noqa: C901 # Update the compute time before calling the scheduler connection_state.update_compute_time() - # Generate instructions using the scheduler + # Generate instructions using the scheduler (this may query the database for costs) schedule_results = self.s2_scheduler.compute() # Filter and send generated instructions From 1769e7847f575210de94171565624b3c0c610847 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 15:31:18 +0200 Subject: [PATCH 111/171] feat: save SystemDescription as asset attributes Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index c53620fda2..ff4d91e937 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -501,6 +501,7 @@ def handle_frbc_system_description( self.ensure_actuator_is_registered( actuator_id=str(actuator.id), resource_id=resource_id ) + self.save_attribute(resource_id, **message.to_dict()) self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_fill_level_target_profile( @@ -583,6 +584,16 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): f"Actuator could not be saved as an asset: {str(exc)}" ) + def save_attribute(self, resource_id: str, **kwargs): + asset = self._assets[resource_id] + for k, v in kwargs.items(): + try: + asset.set_attribute(k, v) + except Exception as exc: + self.app.logger.warning( + f"Failed to save {k}: {v} as an asset attribute of {asset}: {str(exc)}" + ) + @only_if_timer_due("sensor_name", "resource_id") def save_event(self, sensor_name: str, resource_id: str, event_value: float): try: From 4a81e9f125dcd8b8280a37974fa1d87e9e7f98a3 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 15:34:12 +0200 Subject: [PATCH 112/171] refactor: save_event also accepts actuator ID Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index ff4d91e937..b8b1a62bb2 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -533,7 +533,7 @@ def handle_frbc_storage_status( self.save_event( sensor_name="fill level", event_value=message.present_fill_level, - resource_id=resource_id, + resource_or_actuator_id=resource_id, ) self._check_and_generate_instructions(resource_id, websocket) @@ -595,9 +595,9 @@ def save_attribute(self, resource_id: str, **kwargs): ) @only_if_timer_due("sensor_name", "resource_id") - def save_event(self, sensor_name: str, resource_id: str, event_value: float): + def save_event(self, sensor_name: str, resource_or_actuator_id: str, event_value: float): try: - asset = self._assets[resource_id] + asset = self._assets[resource_or_actuator_id] sensor = get_or_create_model( model_class=Sensor, name=sensor_name, From 81353fdea0e2b8e98999be5b0f9d1210746a7a6b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 15:41:26 +0200 Subject: [PATCH 113/171] fix: update decorator Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index b8b1a62bb2..d2a1d66d0e 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -594,8 +594,10 @@ def save_attribute(self, resource_id: str, **kwargs): f"Failed to save {k}: {v} as an asset attribute of {asset}: {str(exc)}" ) - @only_if_timer_due("sensor_name", "resource_id") - def save_event(self, sensor_name: str, resource_or_actuator_id: str, event_value: float): + @only_if_timer_due("sensor_name", "resource_or_actuator_id") + def save_event( + self, sensor_name: str, resource_or_actuator_id: str, event_value: float + ): try: asset = self._assets[resource_or_actuator_id] sensor = get_or_create_model( From a6c76781b70c9ea32875696a485eb8be4137e924 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 15:52:43 +0200 Subject: [PATCH 114/171] fix: timezone aware reference Signed-off-by: F.N. Claessen --- flexmeasures/utils/time_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flexmeasures/utils/time_utils.py b/flexmeasures/utils/time_utils.py index ca949a4ff2..32babae242 100644 --- a/flexmeasures/utils/time_utils.py +++ b/flexmeasures/utils/time_utils.py @@ -23,7 +23,8 @@ def server_now() -> datetime: def floored_server_now(resolution: timedelta) -> datetime: """Return the current server time floored to the nearest multiple of `resolution`.""" - return datetime.min + ((server_now() - datetime.min) // resolution) * resolution + reference = get_timezone().localize(datetime.min) + return reference + ((server_now() - reference) // resolution) * resolution def ensure_local_timezone( From a9556516c8aedfe99b1d4469e5fa72a7813a74bf Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 16:05:00 +0200 Subject: [PATCH 115/171] fix: timezone aware datetime.min out of range Signed-off-by: F.N. Claessen --- flexmeasures/utils/time_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flexmeasures/utils/time_utils.py b/flexmeasures/utils/time_utils.py index 32babae242..b4dbdcd3c6 100644 --- a/flexmeasures/utils/time_utils.py +++ b/flexmeasures/utils/time_utils.py @@ -23,8 +23,9 @@ def server_now() -> datetime: def floored_server_now(resolution: timedelta) -> datetime: """Return the current server time floored to the nearest multiple of `resolution`.""" - reference = get_timezone().localize(datetime.min) - return reference + ((server_now() - reference) // resolution) * resolution + ref = pytz.utc.localize(datetime.min) + tz = get_timezone() + return (ref + ((server_now() - ref) // resolution) * resolution).astimezone(tz) def ensure_local_timezone( From 5a7441b0211c6233e5be2931f1434ebae0724ada Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 16:06:21 +0200 Subject: [PATCH 116/171] refactor: variable renaming Signed-off-by: F.N. Claessen --- flexmeasures/utils/time_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/utils/time_utils.py b/flexmeasures/utils/time_utils.py index b4dbdcd3c6..bdcf0cf06d 100644 --- a/flexmeasures/utils/time_utils.py +++ b/flexmeasures/utils/time_utils.py @@ -24,8 +24,8 @@ def server_now() -> datetime: def floored_server_now(resolution: timedelta) -> datetime: """Return the current server time floored to the nearest multiple of `resolution`.""" ref = pytz.utc.localize(datetime.min) - tz = get_timezone() - return (ref + ((server_now() - ref) // resolution) * resolution).astimezone(tz) + _tz = get_timezone() + return (ref + ((server_now() - ref) // resolution) * resolution).astimezone(_tz) def ensure_local_timezone( From e4dc7374668a9fc72fbe9359437f3fbe2a6e0bad Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 16:19:25 +0200 Subject: [PATCH 117/171] fix: set_attribute assumes attribute already exists Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index d2a1d66d0e..00ff58559c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -588,7 +588,7 @@ def save_attribute(self, resource_id: str, **kwargs): asset = self._assets[resource_id] for k, v in kwargs.items(): try: - asset.set_attribute(k, v) + asset.attributes[k] = v except Exception as exc: self.app.logger.warning( f"Failed to save {k}: {v} as an asset attribute of {asset}: {str(exc)}" From 0f78fd7d3e724434ca72c1308b31761507a13066 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 16:24:21 +0200 Subject: [PATCH 118/171] fix: first convert to JSON then load as dict Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 00ff58559c..077df169e8 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -501,7 +501,7 @@ def handle_frbc_system_description( self.ensure_actuator_is_registered( actuator_id=str(actuator.id), resource_id=resource_id ) - self.save_attribute(resource_id, **message.to_dict()) + self.save_attribute(resource_id, **json.loads(message.to_json())) self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_fill_level_target_profile( From 24fcf5804a983b39d3ac850dadf8ce359b647783 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 20:51:32 +0200 Subject: [PATCH 119/171] feat: save device schedules Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 66 ++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 077df169e8..4520ba8b57 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -10,6 +10,7 @@ from datetime import datetime, timedelta, timezone from typing import Any, Callable, Dict, Optional, Type, List +import pandas as pd from flask import Flask from flask_sock import ConnectionClosed, Sock @@ -23,6 +24,7 @@ from flexmeasures.utils.coding_utils import only_if_timer_due from flexmeasures.utils.flexmeasures_inflection import capitalize from flexmeasures.utils.time_utils import floored_server_now, server_now +from flexmeasures.utils.unit_utils import convert_units from s2python.common import ( ControlType, EnergyManagementRole, @@ -596,32 +598,53 @@ def save_attribute(self, resource_id: str, **kwargs): @only_if_timer_due("sensor_name", "resource_or_actuator_id") def save_event( - self, sensor_name: str, resource_or_actuator_id: str, event_value: float + self, + sensor_name: str, + resource_or_actuator_id: str, + event_value: float | pd.Series, + event_resolution: timedelta | None, + event_unit: str = "", + sensor_unit: str = "", ): + if event_resolution is None: + event_resolution = timedelta(0) try: asset = self._assets[resource_or_actuator_id] sensor = get_or_create_model( model_class=Sensor, name=sensor_name, - unit="", - event_resolution=timedelta(0), + unit=sensor_unit, + event_resolution=event_resolution, generic_asset=asset, ) except Exception as exc: self.app.logger.warning( f"{capitalize(sensor_name)} sensor could not be saved: {str(exc)}" ) + return try: data_source = get_or_create_source(self.user) - belief = TimedBelief( - sensor=sensor, - source=data_source, - event_start=floored_server_now(self._minimum_measurement_period), - event_value=event_value, - belief_time=server_now(), - cumulative_probability=0.5, - ) - bdf = BeliefsDataFrame(beliefs=[belief]) + if isinstance(event_value, float): + belief = TimedBelief( + sensor=sensor, + source=data_source, + event_start=floored_server_now(self._minimum_measurement_period), + event_value=convert_units(event_value, event_unit, sensor_unit), + belief_time=server_now(), + cumulative_probability=0.5, + ) + bdf = BeliefsDataFrame(beliefs=[belief]) + elif isinstance(event_value, pd.Series): + bdf = BeliefsDataFrame( + convert_units(event_value, event_unit, sensor_unit), + sensor=sensor, + source=data_source, + belief_time=server_now(), + cumulative_probability=0.5, + ) + else: + logger.error(f"Cannot save event values of type {type(event_value)}.") + return save_to_db(bdf) except Exception as exc: @@ -769,10 +792,21 @@ def _check_and_generate_instructions( # noqa: C901 ) # Process non-instruction results - for result in schedule_results: - if isinstance(result, dict) and "sensor" in result: - # TODO: save result["data"] to sensor if needed for FlexMeasures - pass + try: + for result in schedule_results: + if isinstance(result, dict) and "device" in result: + self.save_event( + sensor_name="power", + resource_or_actuator_id=self._assets[result["device"]], + event_value=result["data"], + event_resolution=self.s2_scheduler.resolution, + event_unit=result["unit"], + sensor_unit="W", + ) + except Exception as exc: + self.app.logger.warning( + f"Processing non-instruction results failed: {str(exc)}" + ) else: # Scheduler not available - log warning and skip instruction generation self.app.logger.warning( From b6651e6d05828db7efed09b79c98369f7981e414 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Oct 2025 20:55:05 +0200 Subject: [PATCH 120/171] fix: pass string device ID Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 4520ba8b57..818bce648f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -797,7 +797,7 @@ def _check_and_generate_instructions( # noqa: C901 if isinstance(result, dict) and "device" in result: self.save_event( sensor_name="power", - resource_or_actuator_id=self._assets[result["device"]], + resource_or_actuator_id=str(result["device"]), event_value=result["data"], event_resolution=self.s2_scheduler.resolution, event_unit=result["unit"], From 643afea319a4a8f6a63459e61f7ef2b2500e913b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 24 Oct 2025 15:11:46 +0200 Subject: [PATCH 121/171] fix: missing default value for kwarg Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 818bce648f..dc723a9e1f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -602,7 +602,7 @@ def save_event( sensor_name: str, resource_or_actuator_id: str, event_value: float | pd.Series, - event_resolution: timedelta | None, + event_resolution: timedelta | None = None, event_unit: str = "", sensor_unit: str = "", ): From dcb3df2707ac54ebcaaf67d10cb83f45b3a6f637 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 24 Oct 2025 15:18:44 +0200 Subject: [PATCH 122/171] fix: convert energy to power units using resolution Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index dc723a9e1f..63e73b16b9 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -629,7 +629,9 @@ def save_event( sensor=sensor, source=data_source, event_start=floored_server_now(self._minimum_measurement_period), - event_value=convert_units(event_value, event_unit, sensor_unit), + event_value=convert_units( + event_value, event_unit, sensor_unit, event_resolution=self.s2_scheduler.resolution + ), belief_time=server_now(), cumulative_probability=0.5, ) From 65ddbdfaab58534547573ec45e6d8d282a3b334b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 24 Oct 2025 16:58:30 +0200 Subject: [PATCH 123/171] fix: create data source early Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 3 +++ flexmeasures/ws/s2_ws_sync.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 61b9b06e5d..cb80337c45 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -23,6 +23,7 @@ from flexmeasures import User from flexmeasures.data import db +from flexmeasures.data.services.data_sources import get_or_create_source from flexmeasures.data.services.job_cache import JobCache @@ -272,6 +273,8 @@ def ws_connection_auth(): # Attach account to WebSocket server s2_ws.account = user.account s2_ws.user = user + data_source = get_or_create_source(user) + s2_ws.data_source_id = data_source.id app.logger.info("Account authorized for WebSocket connections") except: app.logger.warning("Failed to fetch User") diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 63e73b16b9..4aacb1125d 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -14,13 +14,12 @@ from flask import Flask from flask_sock import ConnectionClosed, Sock -from flexmeasures import Account, Asset, AssetType, Sensor, User +from flexmeasures import Account, Asset, AssetType, Sensor, Source, User from flexmeasures.data import db from flexmeasures.data.models.time_series import TimedBelief from flexmeasures.data.utils import save_to_db from flexmeasures.api.common.utils.validators import parse_duration from flexmeasures.data.services.utils import get_or_create_model -from flexmeasures.data.services.data_sources import get_or_create_source from flexmeasures.utils.coding_utils import only_if_timer_due from flexmeasures.utils.flexmeasures_inflection import capitalize from flexmeasures.utils.time_utils import floored_server_now, server_now @@ -192,6 +191,7 @@ def __init__( self.s2_scheduler = None self.account: Account | None = None self.user: User | None = None + self.data_source_id: int | None = None self._assets: Dict[str, Asset] = {} self._minimum_measurement_period: timedelta = timedelta(minutes=5) @@ -623,7 +623,7 @@ def save_event( ) return try: - data_source = get_or_create_source(self.user) + data_source = db.session.get(Source, self.data_source_id) if isinstance(event_value, float): belief = TimedBelief( sensor=sensor, From 85ea25995590508646b836347bf5854d3d113a8d Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 24 Oct 2025 17:01:35 +0200 Subject: [PATCH 124/171] fix: one more unit conversion Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 4aacb1125d..08a998d36f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -624,21 +624,22 @@ def save_event( return try: data_source = db.session.get(Source, self.data_source_id) + event_value = convert_units( + event_value, event_unit, sensor_unit, event_resolution=self.s2_scheduler.resolution + ) if isinstance(event_value, float): belief = TimedBelief( sensor=sensor, source=data_source, event_start=floored_server_now(self._minimum_measurement_period), - event_value=convert_units( - event_value, event_unit, sensor_unit, event_resolution=self.s2_scheduler.resolution - ), + event_value=event_value, belief_time=server_now(), cumulative_probability=0.5, ) bdf = BeliefsDataFrame(beliefs=[belief]) elif isinstance(event_value, pd.Series): bdf = BeliefsDataFrame( - convert_units(event_value, event_unit, sensor_unit), + event_value, sensor=sensor, source=data_source, belief_time=server_now(), From 99f53612b86b546b0b90b220f35ec673672d17a5 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 25 Oct 2025 00:38:25 +0200 Subject: [PATCH 125/171] feat: localize sensor to FLEXMEASURES_TIMEZONE Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 08a998d36f..3c769804d0 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -615,6 +615,7 @@ def save_event( name=sensor_name, unit=sensor_unit, event_resolution=event_resolution, + timezone=self.app.config["FLEXMEASURES_TIMEZONE"], generic_asset=asset, ) except Exception as exc: From 61e5cbfb867b5cffd5f094dfaabe27c6536134c6 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 25 Oct 2025 00:43:31 +0200 Subject: [PATCH 126/171] dev: log saving of scheduling results Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 3c769804d0..9bf4e02d61 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -799,6 +799,7 @@ def _check_and_generate_instructions( # noqa: C901 try: for result in schedule_results: if isinstance(result, dict) and "device" in result: + self.app.logger.debug(f"Saving result: {result}") self.save_event( sensor_name="power", resource_or_actuator_id=str(result["device"]), From 44ce434190b5edb88d76e529a3f0091701f7c70b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 25 Oct 2025 01:17:00 +0200 Subject: [PATCH 127/171] feat: S2Scheduler gets distinct data source from user-sent data Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 6 ++++++ flexmeasures/ws/s2_ws_sync.py | 9 +++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index cb80337c45..0e8aa81896 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -319,6 +319,12 @@ def ws_connection_auth(): scheduler.info = {"scheduler": "S2FlaskScheduler"} scheduler.config_deserialized = True scheduler.return_multiple = True + scheduler.data_source = get_or_create_source( + source="FlexMeasures", + source_type="scheduler", + model="S2Scheduler", + version="1", + ) # Initialize device states storage scheduler.device_states = {} diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 9bf4e02d61..1a393824a5 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -535,6 +535,7 @@ def handle_frbc_storage_status( self.save_event( sensor_name="fill level", event_value=message.present_fill_level, + data_source=db.session.get(Source, self.data_source_id), resource_or_actuator_id=resource_id, ) self._check_and_generate_instructions(resource_id, websocket) @@ -602,6 +603,7 @@ def save_event( sensor_name: str, resource_or_actuator_id: str, event_value: float | pd.Series, + data_source: Source, event_resolution: timedelta | None = None, event_unit: str = "", sensor_unit: str = "", @@ -624,9 +626,11 @@ def save_event( ) return try: - data_source = db.session.get(Source, self.data_source_id) event_value = convert_units( - event_value, event_unit, sensor_unit, event_resolution=self.s2_scheduler.resolution + event_value, + event_unit, + sensor_unit, + event_resolution=self.s2_scheduler.resolution, ) if isinstance(event_value, float): belief = TimedBelief( @@ -804,6 +808,7 @@ def _check_and_generate_instructions( # noqa: C901 sensor_name="power", resource_or_actuator_id=str(result["device"]), event_value=result["data"], + data_source=self.s2_scheduler.data_source, event_resolution=self.s2_scheduler.resolution, event_unit=result["unit"], sensor_unit="W", From 5409e67282fe1ce95a6f98e4c928569da434b2c3 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 28 Oct 2025 21:14:41 +0100 Subject: [PATCH 128/171] Sending and tracking instruction status update Signed-off-by: Vlad Iftime --- flexmeasures/app.py | 9 ++--- flexmeasures/ws/s2_ws_sync.py | 65 ++++++++++++++++++++++++++++++++--- 2 files changed, 66 insertions(+), 8 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 0e8aa81896..7ce13be13a 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -297,10 +297,11 @@ def ws_connection_auth(): minutes=5 ) # Match example_schedule_frbc.py resolution - # Align to 5-minute boundary - minutes_offset = now.minute % 5 - start_aligned = now.replace( - minute=now.minute - minutes_offset, second=0, microsecond=0 + # Start schedule 15 minutes from now, aligned to 5-minute boundary + future_time = now + timedelta(minutes=15) + minutes_offset = future_time.minute % 5 + start_aligned = future_time.replace( + minute=future_time.minute - minutes_offset, second=0, microsecond=0 ) # Set required attributes for scheduler diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 98dbe6eb84..f01fc87b8c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -29,6 +29,8 @@ EnergyManagementRole, Handshake, HandshakeResponse, + InstructionStatus, + InstructionStatusUpdate, ReceptionStatus, ReceptionStatusValues, RevokableObjects, @@ -98,6 +100,9 @@ def __init__(self): self.sent_instructions: List[FRBCInstruction] = ( [] ) # Store sent instructions for revocation + self.instruction_statuses: Dict[uuid.UUID, InstructionStatus] = ( + {} + ) # Track status of each instruction by instruction_id def can_compute(self, replanning_frequency: timedelta) -> bool: """Check if enough time has passed since the last compute call.""" @@ -110,6 +115,10 @@ def can_compute(self, replanning_frequency: timedelta) -> bool: def update_compute_time(self) -> None: """Update the last compute time to now.""" self.last_compute_time = datetime.now(timezone.utc) + + def update_instruction_status(self, instruction_id: uuid.UUID, status: InstructionStatus) -> None: + """Update the status of an instruction.""" + self.instruction_statuses[instruction_id] = status class MessageHandlersSync: @@ -230,6 +239,9 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler( ResourceManagerDetails, self.handle_ResourceManagerDetails ) + self._handlers.register_handler( + InstructionStatusUpdate, self.handle_instruction_status_update + ) # Register FRBC message handlers self._handlers.register_handler( FRBCSystemDescription, self.handle_frbc_system_description @@ -388,15 +400,30 @@ def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: def _revoke_previous_instructions( self, connection_state: ConnectionState, websocket: Sock ) -> None: - """Revoke all previously sent instructions before sending new ones.""" + """Revoke all previously sent instructions that are still ACCEPTED or NEW before sending new ones.""" if not connection_state.sent_instructions: return + # Filter instructions to only revoke those with ACCEPTED or NEW status + # Instructions with other statuses have already been removed from memory + instructions_to_revoke = [ + instr for instr in connection_state.sent_instructions + if connection_state.instruction_statuses.get( + instr.message_id, InstructionStatus.NEW + ) in (InstructionStatus.NEW, InstructionStatus.ACCEPTED) + ] + + if not instructions_to_revoke: + self.app.logger.info("No instructions to revoke (all have been processed or removed)") + connection_state.sent_instructions.clear() + return + self.app.logger.info( - f"Revoking {len(connection_state.sent_instructions)} previous instructions" + f"Revoking {len(instructions_to_revoke)} previous instructions " + f"(out of {len(connection_state.sent_instructions)} total)" ) - for instruction in connection_state.sent_instructions: + for instruction in instructions_to_revoke: revoke_msg = RevokeObject( message_id=uuid.uuid4(), object_type=RevokableObjects.FRBC_Instruction, @@ -404,7 +431,8 @@ def _revoke_previous_instructions( ) self._send_and_forget(revoke_msg, websocket) self.app.logger.info( - f"Sent RevokeObject for instruction {instruction.message_id}" + f"Sent RevokeObject for instruction {instruction.message_id} " + f"(status: {connection_state.instruction_statuses.get(instruction.message_id, InstructionStatus.NEW)})" ) # Clear the list of sent instructions after revoking @@ -487,6 +515,35 @@ def handle_ResourceManagerDetails( self._device_data[resource_id] = FRBCDeviceData() self._device_data[resource_id].resource_id = resource_id + def handle_instruction_status_update( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, InstructionStatusUpdate): + return + self.app.logger.info( + "Received InstructionStatusUpdate: instruction_id=%s, status=%s", + message.instruction_id, + message.status_type, + ) + + # Get the connection state and update instruction status + connection_state = self._connection_states.get(websocket) + if connection_state: + connection_state.update_instruction_status( + message.instruction_id, message.status_type + ) + + # If instruction is rejected, aborted, or revoked, remove it from sent_instructions + if message.status_type not in (InstructionStatus.NEW, InstructionStatus.ACCEPTED): + # Remove the instruction from sent_instructions list + connection_state.sent_instructions = [ + instr for instr in connection_state.sent_instructions + if instr.message_id != message.instruction_id + ] + self.app.logger.info( + f"Removed instruction {message.instruction_id} from memory due to status {message.status_type}" + ) + def handle_frbc_system_description( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock ) -> None: From f5414c8f9cdf65e41ba74e2add8d3011f0d0874d Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Tue, 28 Oct 2025 22:19:28 +0100 Subject: [PATCH 129/171] Added improved logging Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 290 +++++++++++++++++++++------------- 1 file changed, 180 insertions(+), 110 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index f01fc87b8c..e93576cc8c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -258,14 +258,14 @@ def _register_default_handlers(self) -> None: def _ws_handler(self, ws: Sock) -> None: try: - self.app.logger.info("Received connection from client") + self.app.logger.info("๐Ÿ”Œ New WebSocket connection received") self._handle_websocket_connection(ws) except Exception as e: - self.app.logger.error("Error in websocket handler: %s", e) + self.app.logger.error("โŒ WebSocket handler error: %s", e) def _handle_websocket_connection(self, websocket: Sock) -> None: client_id = str(uuid.uuid4()) - self.app.logger.info(f"Client {client_id} connected (sync)") + self.app.logger.info(f"๐Ÿ”— Client connected: {client_id[:8]}...") self._connections[client_id] = websocket # Initialize connection state for rate limiting self._connection_states[websocket] = ConnectionState() @@ -274,12 +274,22 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: message = websocket.receive() try: s2_msg = self.s2_parser.parse_as_any_message(message) - self.app.logger.info( - f"Received {s2_msg.message_type} message from client" - ) - - # Don't log verbose messages - verbose_message_types = ["FRBC.UsageForecast"] + + # Log with appropriate emoji based on message type + msg_emoji = { + "Handshake": "๐Ÿค", + "FRBC.SystemDescription": "๐Ÿ“‹", + "FRBC.FillLevelTargetProfile": "๐ŸŽฏ", + "FRBC.StorageStatus": "๐Ÿ”‹", + "FRBC.ActuatorStatus": "โš™๏ธ", + "InstructionStatusUpdate": "๐Ÿ“Š", + "ResourceManagerDetails": "๐Ÿ“", + }.get(s2_msg.message_type, "๐Ÿ“ฅ") + + self.app.logger.info(f"{msg_emoji} {s2_msg.message_type}") + + # Don't log verbose message content + verbose_message_types = ["FRBC.UsageForecast", "FRBC.ActuatorStatus"] if s2_msg.message_type not in verbose_message_types: self.app.logger.debug(s2_msg.to_json()) except json.JSONDecodeError: @@ -294,7 +304,6 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: continue try: if not isinstance(s2_msg, ReceptionStatus): - self.respond_with_reception_status( subject_message_id=s2_msg.message_id, status=ReceptionStatusValues.OK, @@ -307,9 +316,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: try: db.session.commit() except Exception as exc: - self.app.logger.warning( - f"Session could not be committed to database: {str(exc)}" - ) + self.app.logger.warning(f"โš ๏ธ DB commit failed: {str(exc)}") except json.JSONDecodeError: self.respond_with_reception_status( subject_message_id=uuid.UUID( @@ -342,7 +349,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: self.app.logger.error("Error processing message: %s", str(e)) raise except ConnectionClosed: - self.app.logger.info(f"Connection with client {client_id} closed (sync)") + self.app.logger.info(f"๐Ÿ”Œ Connection closed: {client_id[:8]}...") finally: if client_id in self._connections: del self._connections[client_id] @@ -367,7 +374,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: if websocket in self._connection_states: del self._connection_states[websocket] - self.app.logger.info(f"Client {client_id} disconnected (sync)") + self.app.logger.info(f"๐Ÿšช Client disconnected: {client_id[:8]}...") def respond_with_reception_status( self, @@ -381,21 +388,18 @@ def respond_with_reception_status( status=status, diagnostic_label=diagnostic_label, ) - self.app.logger.info( - f"Sending reception status {status} for message {subject_message_id} (sync)", - ) + status_emoji = "โœ…" if status == ReceptionStatusValues.OK else "โŒ" + self.app.logger.debug(f"{status_emoji} ReceptionStatus: {status}") try: websocket.send(response.to_json()) except ConnectionClosed: - self.app.logger.warning( - "Connection closed while sending reception status (sync)" - ) + self.app.logger.warning("โš ๏ธ Connection closed during response") def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: try: websocket.send(s2_msg.to_json()) except ConnectionClosed: - self.app.logger.warning("Connection closed while sending message (sync)") + self.app.logger.warning("โš ๏ธ Connection closed during send") def _revoke_previous_instructions( self, connection_state: ConnectionState, websocket: Sock @@ -414,13 +418,12 @@ def _revoke_previous_instructions( ] if not instructions_to_revoke: - self.app.logger.info("No instructions to revoke (all have been processed or removed)") + self.app.logger.info("๐Ÿ”„ No instructions to revoke (all processed)") connection_state.sent_instructions.clear() return self.app.logger.info( - f"Revoking {len(instructions_to_revoke)} previous instructions " - f"(out of {len(connection_state.sent_instructions)} total)" + f"๐Ÿ—‘๏ธ Revoking {len(instructions_to_revoke)}/{len(connection_state.sent_instructions)} instructions" ) for instruction in instructions_to_revoke: @@ -430,10 +433,8 @@ def _revoke_previous_instructions( object_id=instruction.message_id, ) self._send_and_forget(revoke_msg, websocket) - self.app.logger.info( - f"Sent RevokeObject for instruction {instruction.message_id} " - f"(status: {connection_state.instruction_statuses.get(instruction.message_id, InstructionStatus.NEW)})" - ) + status = connection_state.instruction_statuses.get(instruction.message_id, InstructionStatus.NEW) + self.app.logger.debug(f" ๐Ÿšซ Revoked {str(instruction.message_id)[:8]}... ({status.value})") # Clear the list of sent instructions after revoking connection_state.sent_instructions.clear() @@ -447,6 +448,7 @@ def _filter_instructions_by_operation_mode( filtered = [] last_operation_mode = connection_state.last_operation_mode + skipped = 0 for instruction in instructions: # Always include the first instruction if we haven't sent any before @@ -457,7 +459,12 @@ def _filter_instructions_by_operation_mode( ): filtered.append(instruction) last_operation_mode = instruction.operation_mode + else: + skipped += 1 + if skipped > 0: + self.app.logger.info(f"๐Ÿ”ฝ Filtered: {len(instructions)} โ†’ {len(filtered)} instructions (skipped {skipped} duplicate modes)") + return filtered def handle_handshake( @@ -465,12 +472,11 @@ def handle_handshake( ) -> None: if not isinstance(message, Handshake): return - self.app.logger.info("Received Handshake (sync)") self.app.logger.debug(message.to_json()) if S2_VERSION not in message.supported_protocol_versions: raise NotImplementedError( - f"Server supported protocol {S2_VERSION} not supported by client. Client supports: message.supported_protocol_versions" + f"Server protocol {S2_VERSION} not supported by client" ) handshake_response = HandshakeResponse( @@ -478,18 +484,16 @@ def handle_handshake( selected_protocol_version=S2_VERSION, ) self._send_and_forget(handshake_response, websocket) - self.app.logger.info("HandshakeResponse sent (sync)") - self.app.logger.debug(handshake_response) + self.app.logger.info(f"๐Ÿค Handshake complete (protocol {S2_VERSION})") + # If client is RM, send control type selection if hasattr(message, "role") and message.role == EnergyManagementRole.RM: - self.app.logger.debug("Sending control type selection (sync)") select_control_type = SelectControlType( message_id=uuid.uuid4(), control_type=ControlType.FILL_RATE_BASED_CONTROL, ) self._send_and_forget(select_control_type, websocket) - self.app.logger.info("SelectControlType sent (sync)") - self.app.logger.debug(select_control_type) + self.app.logger.info("๐Ÿ“ค SelectControlType: FRBC") def handle_reception_status( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock @@ -503,10 +507,7 @@ def handle_ResourceManagerDetails( ) -> None: if not isinstance(message, ResourceManagerDetails): return - self.app.logger.info( - "Received ResourceManagerDetails (sync): %s", message.to_json() - ) - + # Store the resource_id from ResourceManagerDetails for device identification resource_id = str(message.resource_id) self._websocket_to_resource[websocket] = resource_id @@ -514,18 +515,15 @@ def handle_ResourceManagerDetails( if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() self._device_data[resource_id].resource_id = resource_id + + self.app.logger.info(f"๐Ÿ“ RM registered: {resource_id[:8]}... ({message.name})") def handle_instruction_status_update( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock ) -> None: if not isinstance(message, InstructionStatusUpdate): return - self.app.logger.info( - "Received InstructionStatusUpdate: instruction_id=%s, status=%s", - message.instruction_id, - message.status_type, - ) - + # Get the connection state and update instruction status connection_state = self._connection_states.get(websocket) if connection_state: @@ -533,6 +531,22 @@ def handle_instruction_status_update( message.instruction_id, message.status_type ) + # Status emoji mapping + status_emoji = { + InstructionStatus.NEW: "๐Ÿ†•", + InstructionStatus.ACCEPTED: "โœ…", + InstructionStatus.REJECTED: "โŒ", + InstructionStatus.STARTED: "โ–ถ๏ธ", + InstructionStatus.SUCCEEDED: "๐ŸŽ‰", + InstructionStatus.ABORTED: "โ›”", + InstructionStatus.REVOKED: "๐Ÿšซ", + }.get(message.status_type, "๐Ÿ“Š") + + instr_id_full = str(message.instruction_id) + instr_id_short = instr_id_full[:8] + self.app.logger.info(f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}") + self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") + # If instruction is rejected, aborted, or revoked, remove it from sent_instructions if message.status_type not in (InstructionStatus.NEW, InstructionStatus.ACCEPTED): # Remove the instruction from sent_instructions list @@ -540,27 +554,37 @@ def handle_instruction_status_update( instr for instr in connection_state.sent_instructions if instr.message_id != message.instruction_id ] - self.app.logger.info( - f"Removed instruction {message.instruction_id} from memory due to status {message.status_type}" - ) + self.app.logger.debug(f" ๐Ÿ—‘๏ธ Removed {instr_id_short}... from memory") def handle_frbc_system_description( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock ) -> None: if not isinstance(message, FRBCSystemDescription): return - self.app.logger.info("Received FRBCSystemDescription: %s", message.to_json()) # Get resource_id from websocket mapping resource_id = self._websocket_to_resource.get(websocket, "default_resource") self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].system_description = message + n_actuators = len(message.actuators) if message.actuators else 0 + + # Log details about actuators for actuator in message.actuators: + n_modes = len(actuator.operation_modes) if actuator.operation_modes else 0 + n_transitions = len(actuator.transitions) if actuator.transitions else 0 + n_timers = len(actuator.timers) if actuator.timers else 0 + self.app.logger.debug(f" โš™๏ธ Actuator {str(actuator.id)[:8]}...: {n_modes} modes, {n_transitions} transitions, {n_timers} timers") self.ensure_actuator_is_registered( actuator_id=str(actuator.id), resource_id=resource_id ) + + # Log storage details + if message.storage: + self.app.logger.debug(f" ๐Ÿ’พ Storage: {message.storage.fill_level_range.start_of_range}-{message.storage.fill_level_range.end_of_range} {message.storage.fill_level_label or '%'}") + self.save_attribute(resource_id, **json.loads(message.to_json())) + self.app.logger.info(f"๐Ÿ“‹ SystemDescription: {n_actuators} actuator(s)") self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_fill_level_target_profile( @@ -568,14 +592,24 @@ def handle_frbc_fill_level_target_profile( ) -> None: if not isinstance(message, FRBCFillLevelTargetProfile): return - self.app.logger.info( - "Received FRBCFillLevelTargetProfile: %s", message.to_json() - ) resource_id = self._websocket_to_resource.get(websocket, "default_resource") self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].fill_level_target_profile = message + n_elements = len(message.elements) if message.elements else 0 + + # Log target profile details + if message.elements: + try: + # Duration objects have a value in milliseconds + total_duration_ms = sum(int(elem.duration) for elem in message.elements) + total_duration_min = total_duration_ms / 60000 + self.app.logger.debug(f" ๐ŸŽฏ Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}") + except Exception as e: + self.app.logger.debug(f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}") + + self.app.logger.info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_storage_status( @@ -583,7 +617,6 @@ def handle_frbc_storage_status( ) -> None: if not isinstance(message, FRBCStorageStatus): return - self.app.logger.info("Received FRBCStorageStatus: %s", message.to_json()) resource_id = self._websocket_to_resource.get(websocket, "default_resource") self.ensure_resource_is_registered(resource_id=resource_id) @@ -595,6 +628,7 @@ def handle_frbc_storage_status( data_source=db.session.get(Source, self.data_source_id), resource_or_actuator_id=resource_id, ) + self.app.logger.info(f"๐Ÿ”‹ StorageStatus: {message.present_fill_level:.1f}%") self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_actuator_status( @@ -602,7 +636,6 @@ def handle_frbc_actuator_status( ) -> None: if not isinstance(message, FRBCActuatorStatus): return - self.app.logger.info("Received FRBCActuatorStatus: %s", message.to_json()) resource_id = self._websocket_to_resource.get(websocket, "default_resource") self.ensure_resource_is_registered(resource_id=resource_id) @@ -611,6 +644,7 @@ def handle_frbc_actuator_status( self._device_data[resource_id].actuator_statuses[ str(message.actuator_id) ] = message + self.app.logger.debug(f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}") self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): @@ -723,49 +757,68 @@ def _check_and_generate_instructions( # noqa: C901 """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) if device_data: - # Debug log basic attributes - for attr in ( - "system_description", - "fill_level_target_profile", - "storage_status", - ): - self.app.logger.debug( - f"โœ… {attr}? Go flight!" - if getattr(device_data, attr, None) is not None - else f"โŒ {attr}? Hold on.." - ) - - # Debug log actuator statuses - if ( - device_data.system_description - and device_data.system_description.actuators - ): - required_actuators = { - str(a.id) for a in device_data.system_description.actuators - } + # Build detailed status about what's missing + missing_items = [] + + if not device_data.system_description: + missing_items.append("โŒ SystemDescription") + else: + missing_items.append("โœ… SystemDescription") + + if not device_data.fill_level_target_profile: + missing_items.append("โŒ FillLevelTargetProfile") + else: + missing_items.append("โœ… FillLevelTargetProfile") + + if not device_data.storage_status: + missing_items.append("โŒ StorageStatus") + else: + missing_items.append("โœ… StorageStatus") + + # Check actuator statuses in detail + if device_data.system_description and device_data.system_description.actuators: + required_actuators = {str(a.id) for a in device_data.system_description.actuators} received_actuators = set(device_data.actuator_statuses.keys()) missing_actuators = required_actuators - received_actuators - + if missing_actuators: - self.app.logger.debug( - f"โŒ actuator_status? Hold on.. Missing: {missing_actuators}" - ) + missing_items.append(f"โŒ ActuatorStatus ({len(received_actuators)}/{len(required_actuators)} received)") + for missing_id in missing_actuators: + self.app.logger.debug(f" โณ Missing actuator status for: {missing_id}") else: - self.app.logger.debug( - f"โœ… actuator_status? Go flight! All {len(required_actuators)} actuators received" - ) + missing_items.append(f"โœ… ActuatorStatus (all {len(required_actuators)} received)") + else: + missing_items.append("โŒ ActuatorStatus (no actuators defined)") + + # Log the status + status_summary = " | ".join(missing_items) + self.app.logger.debug(f"๐Ÿ“Š Device readiness: {status_summary}") + if device_data is None or not device_data.is_complete(): - self.app.logger.info( - f"Waiting for more data from device {resource_id} before running the S2FlaskScheduler" - ) + # Log what's still missing + if device_data is None: + self.app.logger.info(f"โณ No device data yet for {resource_id[:8]}...") + else: + missing = [] + if not device_data.system_description: + missing.append("SystemDescription") + if not device_data.fill_level_target_profile: + missing.append("FillLevelTargetProfile") + if not device_data.storage_status: + missing.append("StorageStatus") + if device_data.system_description and device_data.system_description.actuators: + required = {str(a.id) for a in device_data.system_description.actuators} + received = set(device_data.actuator_statuses.keys()) + if required - received: + missing.append("ActuatorStatus") + + self.app.logger.info(f"โณ Waiting for: {', '.join(missing)}") return # Check rate limiting based on FLEXMEASURES_S2_REPLANNING_FREQUENCY connection_state = self._connection_states.get(websocket) if connection_state is None: - self.app.logger.warning( - f"No connection state found for device {resource_id}" - ) + self.app.logger.warning(f"โš ๏ธ No connection state for {resource_id[:8]}...") return # Parse replanning frequency from config @@ -794,16 +847,12 @@ def _check_and_generate_instructions( # noqa: C901 datetime.now(timezone.utc) - connection_state.last_compute_time ) remaining_time = replanning_frequency - time_since_last - self.app.logger.info( - f"Rate limiting: Cannot generate instructions for device {resource_id}. " - f"Last compute was {time_since_last.total_seconds():.1f}s ago. " - f"Need to wait {remaining_time.total_seconds():.1f}s more." + self.app.logger.debug( + f"โฑ๏ธ Rate limit: wait {remaining_time.total_seconds():.0f}s (last: {time_since_last.total_seconds():.0f}s ago)" ) return - self.app.logger.info( - f"All data received for device {resource_id}, generating instructions" - ) + self.app.logger.info(f"๐ŸŽฏ Generating instructions for {resource_id[:8]}...") try: # Use the S2FlaskScheduler to create and store device state @@ -837,30 +886,51 @@ def _check_and_generate_instructions( # noqa: C901 # Revoke previous instructions before sending new ones self._revoke_previous_instructions(connection_state, websocket) + # Log instruction summary before sending + if filtered_instructions: + self.app.logger.info(f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):") + # Send new instructions and store them - for instruction in filtered_instructions: + for idx, instruction in enumerate(filtered_instructions, 1): self._send_and_forget(instruction, websocket) - self.app.logger.info( - f"Sent FRBC instruction: {instruction.to_json()}" - ) + + # Full IDs + instr_id_full = str(instruction.message_id) + mode_id_full = str(instruction.operation_mode) + actuator_id_full = str(instruction.actuator_id) + + # Short IDs for compact display + instr_id_short = instr_id_full[:8] + mode_id_short = mode_id_full[:8] + actuator_short = actuator_id_full[:8] + + exec_time = instruction.execution_time.strftime("%H:%M:%S") if hasattr(instruction.execution_time, 'strftime') else str(instruction.execution_time) + factor = instruction.operation_mode_factor + + # Log with short IDs for readability + self.app.logger.info(f" {idx}. {instr_id_short}... | mode: {mode_id_short}... | factor: {factor:.2f} | actuator: {actuator_short}... | exec: {exec_time}") + + # Log full IDs at debug level + self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") + self.app.logger.debug(f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}") + self.app.logger.debug(f" โš™๏ธ Full actuator ID: {actuator_id_full}") + # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode # Store the sent instructions for future revocation connection_state.sent_instructions = filtered_instructions.copy() - # Log filtering results - if len(frbc_instructions) > len(filtered_instructions): - self.app.logger.info( - f"Filtered instructions: {len(frbc_instructions)} -> {len(filtered_instructions)} " - f"(reduced by {len(frbc_instructions) - len(filtered_instructions)})" - ) - # Process non-instruction results try: + energy_data_count = 0 for result in schedule_results: if isinstance(result, dict) and "device" in result: - self.app.logger.debug(f"Saving result: {result}") + energy_data_count += 1 + device_short = str(result["device"])[:8] + if isinstance(result.get("data"), pd.Series): + n_values = len(result["data"]) + self.app.logger.debug(f" ๐Ÿ’พ Saving {n_values} energy values for device {device_short}... ({result.get('unit', '?')})") self.save_event( sensor_name="power", resource_or_actuator_id=str(result["device"]), @@ -870,10 +940,10 @@ def _check_and_generate_instructions( # noqa: C901 event_unit=result["unit"], sensor_unit="W", ) + if energy_data_count > 0: + self.app.logger.info(f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)") except Exception as exc: - self.app.logger.warning( - f"Processing non-instruction results failed: {str(exc)}" - ) + self.app.logger.warning(f"โš ๏ธ Energy data save failed: {str(exc)}") else: # Scheduler not available - log warning and skip instruction generation self.app.logger.warning( From 8f5f02bc94844c13a6d458055df7fe9cb7f4864a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 25 Oct 2025 01:25:13 +0200 Subject: [PATCH 130/171] docs: fix typos Signed-off-by: F.N. Claessen --- flexmeasures/data/models/data_sources.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/data/models/data_sources.py b/flexmeasures/data/models/data_sources.py index 22009d5026..aae922e09f 100644 --- a/flexmeasures/data/models/data_sources.py +++ b/flexmeasures/data/models/data_sources.py @@ -142,8 +142,8 @@ def get_data_source_info(cls: type) -> dict: @property def data_source(self) -> "DataSource": """DataSource property derived from the `source_info`: `source_type` (scheduler, forecaster or reporter), `model` (e.g AggregatorReporter) - and `attributes`. It looks for a data source in the database the marges the `source_info` and, in case of not finding any, it creates a new one. - This property gets created once and it's cached for the rest of the lifetime of the DataGenerator object. + and `attributes`. It looks for a data source in the database that matches the `source_info` and, in case of not finding any, it creates a new one. + This property gets created once, and it's cached for the rest of the lifetime of the DataGenerator object. """ from flexmeasures.data.services.data_sources import get_or_create_source From 7456359321d0565f25f9daa20e34170131d279b7 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 29 Oct 2025 10:39:27 +0100 Subject: [PATCH 131/171] feat: save scheduled fill level Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 70 +++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 31 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index e93576cc8c..08d89e234f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -115,7 +115,7 @@ def can_compute(self, replanning_frequency: timedelta) -> bool: def update_compute_time(self) -> None: """Update the last compute time to now.""" self.last_compute_time = datetime.now(timezone.utc) - + def update_instruction_status(self, instruction_id: uuid.UUID, status: InstructionStatus) -> None: """Update the status of an instruction.""" self.instruction_statuses[instruction_id] = status @@ -274,7 +274,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: message = websocket.receive() try: s2_msg = self.s2_parser.parse_as_any_message(message) - + # Log with appropriate emoji based on message type msg_emoji = { "Handshake": "๐Ÿค", @@ -285,7 +285,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: "InstructionStatusUpdate": "๐Ÿ“Š", "ResourceManagerDetails": "๐Ÿ“", }.get(s2_msg.message_type, "๐Ÿ“ฅ") - + self.app.logger.info(f"{msg_emoji} {s2_msg.message_type}") # Don't log verbose message content @@ -464,7 +464,7 @@ def _filter_instructions_by_operation_mode( if skipped > 0: self.app.logger.info(f"๐Ÿ”ฝ Filtered: {len(instructions)} โ†’ {len(filtered)} instructions (skipped {skipped} duplicate modes)") - + return filtered def handle_handshake( @@ -485,7 +485,7 @@ def handle_handshake( ) self._send_and_forget(handshake_response, websocket) self.app.logger.info(f"๐Ÿค Handshake complete (protocol {S2_VERSION})") - + # If client is RM, send control type selection if hasattr(message, "role") and message.role == EnergyManagementRole.RM: select_control_type = SelectControlType( @@ -507,7 +507,7 @@ def handle_ResourceManagerDetails( ) -> None: if not isinstance(message, ResourceManagerDetails): return - + # Store the resource_id from ResourceManagerDetails for device identification resource_id = str(message.resource_id) self._websocket_to_resource[websocket] = resource_id @@ -515,7 +515,7 @@ def handle_ResourceManagerDetails( if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() self._device_data[resource_id].resource_id = resource_id - + self.app.logger.info(f"๐Ÿ“ RM registered: {resource_id[:8]}... ({message.name})") def handle_instruction_status_update( @@ -523,14 +523,14 @@ def handle_instruction_status_update( ) -> None: if not isinstance(message, InstructionStatusUpdate): return - + # Get the connection state and update instruction status connection_state = self._connection_states.get(websocket) if connection_state: connection_state.update_instruction_status( message.instruction_id, message.status_type ) - + # Status emoji mapping status_emoji = { InstructionStatus.NEW: "๐Ÿ†•", @@ -541,12 +541,12 @@ def handle_instruction_status_update( InstructionStatus.ABORTED: "โ›”", InstructionStatus.REVOKED: "๐Ÿšซ", }.get(message.status_type, "๐Ÿ“Š") - + instr_id_full = str(message.instruction_id) instr_id_short = instr_id_full[:8] self.app.logger.info(f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}") self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") - + # If instruction is rejected, aborted, or revoked, remove it from sent_instructions if message.status_type not in (InstructionStatus.NEW, InstructionStatus.ACCEPTED): # Remove the instruction from sent_instructions list @@ -568,7 +568,7 @@ def handle_frbc_system_description( self._device_data[resource_id].system_description = message n_actuators = len(message.actuators) if message.actuators else 0 - + # Log details about actuators for actuator in message.actuators: n_modes = len(actuator.operation_modes) if actuator.operation_modes else 0 @@ -578,11 +578,11 @@ def handle_frbc_system_description( self.ensure_actuator_is_registered( actuator_id=str(actuator.id), resource_id=resource_id ) - + # Log storage details if message.storage: self.app.logger.debug(f" ๐Ÿ’พ Storage: {message.storage.fill_level_range.start_of_range}-{message.storage.fill_level_range.end_of_range} {message.storage.fill_level_label or '%'}") - + self.save_attribute(resource_id, **json.loads(message.to_json())) self.app.logger.info(f"๐Ÿ“‹ SystemDescription: {n_actuators} actuator(s)") self._check_and_generate_instructions(resource_id, websocket) @@ -598,7 +598,7 @@ def handle_frbc_fill_level_target_profile( self._device_data[resource_id].fill_level_target_profile = message n_elements = len(message.elements) if message.elements else 0 - + # Log target profile details if message.elements: try: @@ -608,7 +608,7 @@ def handle_frbc_fill_level_target_profile( self.app.logger.debug(f" ๐ŸŽฏ Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}") except Exception as e: self.app.logger.debug(f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}") - + self.app.logger.info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) @@ -759,28 +759,28 @@ def _check_and_generate_instructions( # noqa: C901 if device_data: # Build detailed status about what's missing missing_items = [] - + if not device_data.system_description: missing_items.append("โŒ SystemDescription") else: missing_items.append("โœ… SystemDescription") - + if not device_data.fill_level_target_profile: missing_items.append("โŒ FillLevelTargetProfile") else: missing_items.append("โœ… FillLevelTargetProfile") - + if not device_data.storage_status: missing_items.append("โŒ StorageStatus") else: missing_items.append("โœ… StorageStatus") - + # Check actuator statuses in detail if device_data.system_description and device_data.system_description.actuators: required_actuators = {str(a.id) for a in device_data.system_description.actuators} received_actuators = set(device_data.actuator_statuses.keys()) missing_actuators = required_actuators - received_actuators - + if missing_actuators: missing_items.append(f"โŒ ActuatorStatus ({len(received_actuators)}/{len(required_actuators)} received)") for missing_id in missing_actuators: @@ -789,11 +789,11 @@ def _check_and_generate_instructions( # noqa: C901 missing_items.append(f"โœ… ActuatorStatus (all {len(required_actuators)} received)") else: missing_items.append("โŒ ActuatorStatus (no actuators defined)") - + # Log the status status_summary = " | ".join(missing_items) self.app.logger.debug(f"๐Ÿ“Š Device readiness: {status_summary}") - + if device_data is None or not device_data.is_complete(): # Log what's still missing if device_data is None: @@ -811,7 +811,7 @@ def _check_and_generate_instructions( # noqa: C901 received = set(device_data.actuator_statuses.keys()) if required - received: missing.append("ActuatorStatus") - + self.app.logger.info(f"โณ Waiting for: {', '.join(missing)}") return @@ -889,32 +889,32 @@ def _check_and_generate_instructions( # noqa: C901 # Log instruction summary before sending if filtered_instructions: self.app.logger.info(f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):") - + # Send new instructions and store them for idx, instruction in enumerate(filtered_instructions, 1): self._send_and_forget(instruction, websocket) - + # Full IDs instr_id_full = str(instruction.message_id) mode_id_full = str(instruction.operation_mode) actuator_id_full = str(instruction.actuator_id) - + # Short IDs for compact display instr_id_short = instr_id_full[:8] mode_id_short = mode_id_full[:8] actuator_short = actuator_id_full[:8] - + exec_time = instruction.execution_time.strftime("%H:%M:%S") if hasattr(instruction.execution_time, 'strftime') else str(instruction.execution_time) factor = instruction.operation_mode_factor - + # Log with short IDs for readability self.app.logger.info(f" {idx}. {instr_id_short}... | mode: {mode_id_short}... | factor: {factor:.2f} | actuator: {actuator_short}... | exec: {exec_time}") - + # Log full IDs at debug level self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") self.app.logger.debug(f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}") self.app.logger.debug(f" โš™๏ธ Full actuator ID: {actuator_id_full}") - + # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode @@ -940,6 +940,14 @@ def _check_and_generate_instructions( # noqa: C901 event_unit=result["unit"], sensor_unit="W", ) + if isinstance(result, dict) and "fill level" in result: + self.app.logger.debug(f"Saving result: {result}") + self.save_event( + sensor_name="fill level", + resource_or_actuator_id=str(result["device"]), + event_value=result["data"], + data_source=self.s2_scheduler.data_source, + ) if energy_data_count > 0: self.app.logger.info(f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)") except Exception as exc: From 622d12f7d2f468640456a90351016b75681981f2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 29 Oct 2025 11:16:28 +0100 Subject: [PATCH 132/171] fix: wrong dict key Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 08d89e234f..46d305d518 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -944,7 +944,7 @@ def _check_and_generate_instructions( # noqa: C901 self.app.logger.debug(f"Saving result: {result}") self.save_event( sensor_name="fill level", - resource_or_actuator_id=str(result["device"]), + resource_or_actuator_id=str(result["fill level"]), event_value=result["data"], data_source=self.s2_scheduler.data_source, ) From a1b2943daf1ac58391e77b9099c8dbc0bc1b6051 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Oct 2025 16:23:22 +0100 Subject: [PATCH 133/171] feat: save PowerMeasurement Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 46d305d518..eead53426e 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -31,6 +31,7 @@ HandshakeResponse, InstructionStatus, InstructionStatusUpdate, + PowerMeasurement, ReceptionStatus, ReceptionStatusValues, RevokableObjects, @@ -255,6 +256,9 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler( FRBCActuatorStatus, self.handle_frbc_actuator_status ) + self._handlers.register_handler( + PowerMeasurement, self.handle_power_measurement + ) def _ws_handler(self, ws: Sock) -> None: try: @@ -284,6 +288,7 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: "FRBC.ActuatorStatus": "โš™๏ธ", "InstructionStatusUpdate": "๐Ÿ“Š", "ResourceManagerDetails": "๐Ÿ“", + "PowerMeasurement": "โšก", }.get(s2_msg.message_type, "๐Ÿ“ฅ") self.app.logger.info(f"{msg_emoji} {s2_msg.message_type}") @@ -612,6 +617,25 @@ def handle_frbc_fill_level_target_profile( self.app.logger.info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) + def handle_power_measurement( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, PowerMeasurement): + return + + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + self.ensure_resource_is_registered(resource_id=resource_id) + + power_measurements = message.values + for measurement in power_measurements: + self.save_event( + sensor_name=measurement.commodity_quantity, + event_value=message.value, + event_start=message.measurement_timestamp, + data_source=db.session.get(Source, self.data_source_id), + resource_or_actuator_id=resource_id, + ) + def handle_frbc_storage_status( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock ) -> None: @@ -694,6 +718,7 @@ def save_event( sensor_name: str, resource_or_actuator_id: str, event_value: float | pd.Series, + event_start: str, data_source: Source, event_resolution: timedelta | None = None, event_unit: str = "", @@ -727,7 +752,7 @@ def save_event( belief = TimedBelief( sensor=sensor, source=data_source, - event_start=floored_server_now(self._minimum_measurement_period), + event_start=event_start or floored_server_now(self._minimum_measurement_period), event_value=event_value, belief_time=server_now(), cumulative_probability=0.5, From 9af1f8da4f62cb710bb94fd1176bd803718df0cc Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Oct 2025 16:23:42 +0100 Subject: [PATCH 134/171] style: black Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 123 +++++++++++++++++++++++++--------- 1 file changed, 93 insertions(+), 30 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index eead53426e..b43ffda06d 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -117,7 +117,9 @@ def update_compute_time(self) -> None: """Update the last compute time to now.""" self.last_compute_time = datetime.now(timezone.utc) - def update_instruction_status(self, instruction_id: uuid.UUID, status: InstructionStatus) -> None: + def update_instruction_status( + self, instruction_id: uuid.UUID, status: InstructionStatus + ) -> None: """Update the status of an instruction.""" self.instruction_statuses[instruction_id] = status @@ -294,7 +296,10 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: self.app.logger.info(f"{msg_emoji} {s2_msg.message_type}") # Don't log verbose message content - verbose_message_types = ["FRBC.UsageForecast", "FRBC.ActuatorStatus"] + verbose_message_types = [ + "FRBC.UsageForecast", + "FRBC.ActuatorStatus", + ] if s2_msg.message_type not in verbose_message_types: self.app.logger.debug(s2_msg.to_json()) except json.JSONDecodeError: @@ -416,10 +421,12 @@ def _revoke_previous_instructions( # Filter instructions to only revoke those with ACCEPTED or NEW status # Instructions with other statuses have already been removed from memory instructions_to_revoke = [ - instr for instr in connection_state.sent_instructions + instr + for instr in connection_state.sent_instructions if connection_state.instruction_statuses.get( instr.message_id, InstructionStatus.NEW - ) in (InstructionStatus.NEW, InstructionStatus.ACCEPTED) + ) + in (InstructionStatus.NEW, InstructionStatus.ACCEPTED) ] if not instructions_to_revoke: @@ -438,8 +445,12 @@ def _revoke_previous_instructions( object_id=instruction.message_id, ) self._send_and_forget(revoke_msg, websocket) - status = connection_state.instruction_statuses.get(instruction.message_id, InstructionStatus.NEW) - self.app.logger.debug(f" ๐Ÿšซ Revoked {str(instruction.message_id)[:8]}... ({status.value})") + status = connection_state.instruction_statuses.get( + instruction.message_id, InstructionStatus.NEW + ) + self.app.logger.debug( + f" ๐Ÿšซ Revoked {str(instruction.message_id)[:8]}... ({status.value})" + ) # Clear the list of sent instructions after revoking connection_state.sent_instructions.clear() @@ -468,7 +479,9 @@ def _filter_instructions_by_operation_mode( skipped += 1 if skipped > 0: - self.app.logger.info(f"๐Ÿ”ฝ Filtered: {len(instructions)} โ†’ {len(filtered)} instructions (skipped {skipped} duplicate modes)") + self.app.logger.info( + f"๐Ÿ”ฝ Filtered: {len(instructions)} โ†’ {len(filtered)} instructions (skipped {skipped} duplicate modes)" + ) return filtered @@ -549,14 +562,20 @@ def handle_instruction_status_update( instr_id_full = str(message.instruction_id) instr_id_short = instr_id_full[:8] - self.app.logger.info(f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}") + self.app.logger.info( + f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}" + ) self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") # If instruction is rejected, aborted, or revoked, remove it from sent_instructions - if message.status_type not in (InstructionStatus.NEW, InstructionStatus.ACCEPTED): + if message.status_type not in ( + InstructionStatus.NEW, + InstructionStatus.ACCEPTED, + ): # Remove the instruction from sent_instructions list connection_state.sent_instructions = [ - instr for instr in connection_state.sent_instructions + instr + for instr in connection_state.sent_instructions if instr.message_id != message.instruction_id ] self.app.logger.debug(f" ๐Ÿ—‘๏ธ Removed {instr_id_short}... from memory") @@ -579,14 +598,18 @@ def handle_frbc_system_description( n_modes = len(actuator.operation_modes) if actuator.operation_modes else 0 n_transitions = len(actuator.transitions) if actuator.transitions else 0 n_timers = len(actuator.timers) if actuator.timers else 0 - self.app.logger.debug(f" โš™๏ธ Actuator {str(actuator.id)[:8]}...: {n_modes} modes, {n_transitions} transitions, {n_timers} timers") + self.app.logger.debug( + f" โš™๏ธ Actuator {str(actuator.id)[:8]}...: {n_modes} modes, {n_transitions} transitions, {n_timers} timers" + ) self.ensure_actuator_is_registered( actuator_id=str(actuator.id), resource_id=resource_id ) # Log storage details if message.storage: - self.app.logger.debug(f" ๐Ÿ’พ Storage: {message.storage.fill_level_range.start_of_range}-{message.storage.fill_level_range.end_of_range} {message.storage.fill_level_label or '%'}") + self.app.logger.debug( + f" ๐Ÿ’พ Storage: {message.storage.fill_level_range.start_of_range}-{message.storage.fill_level_range.end_of_range} {message.storage.fill_level_label or '%'}" + ) self.save_attribute(resource_id, **json.loads(message.to_json())) self.app.logger.info(f"๐Ÿ“‹ SystemDescription: {n_actuators} actuator(s)") @@ -610,9 +633,13 @@ def handle_frbc_fill_level_target_profile( # Duration objects have a value in milliseconds total_duration_ms = sum(int(elem.duration) for elem in message.elements) total_duration_min = total_duration_ms / 60000 - self.app.logger.debug(f" ๐ŸŽฏ Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}") + self.app.logger.debug( + f" ๐ŸŽฏ Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" + ) except Exception as e: - self.app.logger.debug(f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}") + self.app.logger.debug( + f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}" + ) self.app.logger.info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) @@ -668,7 +695,9 @@ def handle_frbc_actuator_status( self._device_data[resource_id].actuator_statuses[ str(message.actuator_id) ] = message - self.app.logger.debug(f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}") + self.app.logger.debug( + f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}" + ) self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): @@ -801,17 +830,28 @@ def _check_and_generate_instructions( # noqa: C901 missing_items.append("โœ… StorageStatus") # Check actuator statuses in detail - if device_data.system_description and device_data.system_description.actuators: - required_actuators = {str(a.id) for a in device_data.system_description.actuators} + if ( + device_data.system_description + and device_data.system_description.actuators + ): + required_actuators = { + str(a.id) for a in device_data.system_description.actuators + } received_actuators = set(device_data.actuator_statuses.keys()) missing_actuators = required_actuators - received_actuators if missing_actuators: - missing_items.append(f"โŒ ActuatorStatus ({len(received_actuators)}/{len(required_actuators)} received)") + missing_items.append( + f"โŒ ActuatorStatus ({len(received_actuators)}/{len(required_actuators)} received)" + ) for missing_id in missing_actuators: - self.app.logger.debug(f" โณ Missing actuator status for: {missing_id}") + self.app.logger.debug( + f" โณ Missing actuator status for: {missing_id}" + ) else: - missing_items.append(f"โœ… ActuatorStatus (all {len(required_actuators)} received)") + missing_items.append( + f"โœ… ActuatorStatus (all {len(required_actuators)} received)" + ) else: missing_items.append("โŒ ActuatorStatus (no actuators defined)") @@ -831,8 +871,13 @@ def _check_and_generate_instructions( # noqa: C901 missing.append("FillLevelTargetProfile") if not device_data.storage_status: missing.append("StorageStatus") - if device_data.system_description and device_data.system_description.actuators: - required = {str(a.id) for a in device_data.system_description.actuators} + if ( + device_data.system_description + and device_data.system_description.actuators + ): + required = { + str(a.id) for a in device_data.system_description.actuators + } received = set(device_data.actuator_statuses.keys()) if required - received: missing.append("ActuatorStatus") @@ -913,7 +958,9 @@ def _check_and_generate_instructions( # noqa: C901 # Log instruction summary before sending if filtered_instructions: - self.app.logger.info(f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):") + self.app.logger.info( + f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):" + ) # Send new instructions and store them for idx, instruction in enumerate(filtered_instructions, 1): @@ -929,16 +976,28 @@ def _check_and_generate_instructions( # noqa: C901 mode_id_short = mode_id_full[:8] actuator_short = actuator_id_full[:8] - exec_time = instruction.execution_time.strftime("%H:%M:%S") if hasattr(instruction.execution_time, 'strftime') else str(instruction.execution_time) + exec_time = ( + instruction.execution_time.strftime("%H:%M:%S") + if hasattr(instruction.execution_time, "strftime") + else str(instruction.execution_time) + ) factor = instruction.operation_mode_factor # Log with short IDs for readability - self.app.logger.info(f" {idx}. {instr_id_short}... | mode: {mode_id_short}... | factor: {factor:.2f} | actuator: {actuator_short}... | exec: {exec_time}") + self.app.logger.info( + f" {idx}. {instr_id_short}... | mode: {mode_id_short}... | factor: {factor:.2f} | actuator: {actuator_short}... | exec: {exec_time}" + ) # Log full IDs at debug level - self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") - self.app.logger.debug(f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}") - self.app.logger.debug(f" โš™๏ธ Full actuator ID: {actuator_id_full}") + self.app.logger.debug( + f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}" + ) + self.app.logger.debug( + f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}" + ) + self.app.logger.debug( + f" โš™๏ธ Full actuator ID: {actuator_id_full}" + ) # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode @@ -955,7 +1014,9 @@ def _check_and_generate_instructions( # noqa: C901 device_short = str(result["device"])[:8] if isinstance(result.get("data"), pd.Series): n_values = len(result["data"]) - self.app.logger.debug(f" ๐Ÿ’พ Saving {n_values} energy values for device {device_short}... ({result.get('unit', '?')})") + self.app.logger.debug( + f" ๐Ÿ’พ Saving {n_values} energy values for device {device_short}... ({result.get('unit', '?')})" + ) self.save_event( sensor_name="power", resource_or_actuator_id=str(result["device"]), @@ -974,7 +1035,9 @@ def _check_and_generate_instructions( # noqa: C901 data_source=self.s2_scheduler.data_source, ) if energy_data_count > 0: - self.app.logger.info(f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)") + self.app.logger.info( + f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)" + ) except Exception as exc: self.app.logger.warning(f"โš ๏ธ Energy data save failed: {str(exc)}") else: From 3e7534c50e531b75516f5854e5837176ff9d1aeb Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 5 Nov 2025 11:27:55 +0100 Subject: [PATCH 135/171] 15 minute delay for schedule and fixed revoking --- flexmeasures/app.py | 21 ++++++--------------- flexmeasures/ws/s2_ws_sync.py | 19 +++++++++++++++++-- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 7ce13be13a..8247df0aa8 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -293,26 +293,17 @@ def ws_connection_auth(): # Set basic time parameters now = datetime.now(timezone.utc) - resolution = timedelta( - minutes=5 - ) # Match example_schedule_frbc.py resolution - - # Start schedule 15 minutes from now, aligned to 5-minute boundary - future_time = now + timedelta(minutes=15) - minutes_offset = future_time.minute % 5 - start_aligned = future_time.replace( - minute=future_time.minute - minutes_offset, second=0, microsecond=0 - ) + resolution = timedelta(minutes=5) # Set required attributes for scheduler + # Note: start, end, and belief_time will be recalculated dynamically + # in s2_ws_sync.py before each scheduler call scheduler.sensor = None scheduler.asset = None - scheduler.start = start_aligned - scheduler.end = start_aligned + timedelta( - hours=24 - ) # 24-hour planning window + scheduler.start = now + scheduler.end = now + timedelta(hours=24) scheduler.resolution = resolution - scheduler.belief_time = start_aligned + scheduler.belief_time = now scheduler.round_to_decimals = 6 scheduler.flex_model = {} scheduler.flex_context = {} diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index b43ffda06d..bd7df31168 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -442,14 +442,14 @@ def _revoke_previous_instructions( revoke_msg = RevokeObject( message_id=uuid.uuid4(), object_type=RevokableObjects.FRBC_Instruction, - object_id=instruction.message_id, + object_id=instruction.instruction_id, ) self._send_and_forget(revoke_msg, websocket) status = connection_state.instruction_statuses.get( instruction.message_id, InstructionStatus.NEW ) self.app.logger.debug( - f" ๐Ÿšซ Revoked {str(instruction.message_id)[:8]}... ({status.value})" + f" ๐Ÿšซ Revoked instruction {str(instruction.instruction_id)[:8]}... ({status.value})" ) # Clear the list of sent instructions after revoking @@ -940,6 +940,21 @@ def _check_and_generate_instructions( # noqa: C901 # Update the compute time before calling the scheduler connection_state.update_compute_time() + # Recalculate scheduler start time: 15 minutes from now, aligned to 5-minute boundary + now = datetime.now(timezone.utc) + future_time = now + timedelta(minutes=15) + minutes_offset = future_time.minute % 5 + start_aligned = future_time.replace( + minute=future_time.minute - minutes_offset, second=0, microsecond=0 + ) + + # Update scheduler time window + self.s2_scheduler.start = start_aligned + self.s2_scheduler.end = start_aligned + timedelta(hours=24) # 24-hour planning window + self.s2_scheduler.belief_time = start_aligned + + self.app.logger.debug(f"๐Ÿ• Scheduler window: {start_aligned.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}") + # Generate instructions using the scheduler (this may query the database for costs) schedule_results = self.s2_scheduler.compute() From 5131bab89a2a6462a153eee88a314bc6bdd95718 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 5 Nov 2025 11:54:29 +0100 Subject: [PATCH 136/171] feat: use TEMPORARY_ERROR instead of PERMANENT_ERROR for field tests Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index bd7df31168..3359bb6e41 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -156,7 +156,7 @@ def handle_message( "message_id", uuid.UUID("00000000-0000-0000-0000-000000000000"), ), - status=ReceptionStatusValues.PERMANENT_ERROR, + status=ReceptionStatusValues.TEMPORARY_ERROR, diagnostic_label=f"While processing message {message_id} an unrecoverable error occurred.", websocket=websocket, ) From aa08c8556fc22877cf5fd0abc59d0238234a60a0 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 5 Nov 2025 11:54:55 +0100 Subject: [PATCH 137/171] style: black Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 3359bb6e41..961954a98a 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -258,9 +258,7 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler( FRBCActuatorStatus, self.handle_frbc_actuator_status ) - self._handlers.register_handler( - PowerMeasurement, self.handle_power_measurement - ) + self._handlers.register_handler(PowerMeasurement, self.handle_power_measurement) def _ws_handler(self, ws: Sock) -> None: try: @@ -781,7 +779,8 @@ def save_event( belief = TimedBelief( sensor=sensor, source=data_source, - event_start=event_start or floored_server_now(self._minimum_measurement_period), + event_start=event_start + or floored_server_now(self._minimum_measurement_period), event_value=event_value, belief_time=server_now(), cumulative_probability=0.5, @@ -947,13 +946,17 @@ def _check_and_generate_instructions( # noqa: C901 start_aligned = future_time.replace( minute=future_time.minute - minutes_offset, second=0, microsecond=0 ) - + # Update scheduler time window self.s2_scheduler.start = start_aligned - self.s2_scheduler.end = start_aligned + timedelta(hours=24) # 24-hour planning window + self.s2_scheduler.end = start_aligned + timedelta( + hours=24 + ) # 24-hour planning window self.s2_scheduler.belief_time = start_aligned - - self.app.logger.debug(f"๐Ÿ• Scheduler window: {start_aligned.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}") + + self.app.logger.debug( + f"๐Ÿ• Scheduler window: {start_aligned.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}" + ) # Generate instructions using the scheduler (this may query the database for costs) schedule_results = self.s2_scheduler.compute() From f7f44d9c4c7018c5e23cd893c620c59f92a3e0b7 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 5 Nov 2025 11:56:14 +0100 Subject: [PATCH 138/171] fix: PowerMeasurement.values Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 961954a98a..c6d867a5ca 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -653,13 +653,18 @@ def handle_power_measurement( power_measurements = message.values for measurement in power_measurements: - self.save_event( - sensor_name=measurement.commodity_quantity, - event_value=message.value, - event_start=message.measurement_timestamp, - data_source=db.session.get(Source, self.data_source_id), - resource_or_actuator_id=resource_id, - ) + try: + self.save_event( + sensor_name=measurement.commodity_quantity, + event_value=message.values, + event_start=message.measurement_timestamp, + data_source=db.session.get(Source, self.data_source_id), + resource_or_actuator_id=resource_id, + ) + except Exception as exc: + self.app.logger.warning( + f"PowerMeasurement could not be saved: {str(exc)}" + ) def handle_frbc_storage_status( self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock From 36137dbf1e609bc52a94dee11085522e176b1674 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 5 Nov 2025 12:03:21 +0100 Subject: [PATCH 139/171] fix: coding error Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index c6d867a5ca..cce3cf0eeb 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -750,8 +750,8 @@ def save_event( sensor_name: str, resource_or_actuator_id: str, event_value: float | pd.Series, - event_start: str, data_source: Source, + event_start: str | None = None, event_resolution: timedelta | None = None, event_unit: str = "", sensor_unit: str = "", From 63b7bbc5adf2ea83274a9b0359ae2bce17124f2a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 5 Nov 2025 12:12:39 +0100 Subject: [PATCH 140/171] fix: instruction.id rather than instruction.instruction_id Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index cce3cf0eeb..5bf1887ec2 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -440,14 +440,14 @@ def _revoke_previous_instructions( revoke_msg = RevokeObject( message_id=uuid.uuid4(), object_type=RevokableObjects.FRBC_Instruction, - object_id=instruction.instruction_id, + object_id=instruction.id, ) self._send_and_forget(revoke_msg, websocket) status = connection_state.instruction_statuses.get( instruction.message_id, InstructionStatus.NEW ) self.app.logger.debug( - f" ๐Ÿšซ Revoked instruction {str(instruction.instruction_id)[:8]}... ({status.value})" + f" ๐Ÿšซ Revoked instruction {str(instruction.id)[:8]}... ({status.value})" ) # Clear the list of sent instructions after revoking From 522770b46e158f7a652e62d33c13fb35325ea852 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Wed, 5 Nov 2025 18:55:22 +0100 Subject: [PATCH 141/171] Handler for leakeage and usage behaviour Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 96 +++++++++++++++++++++++++++++++++-- 1 file changed, 91 insertions(+), 5 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index bd7df31168..7e27333a2f 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -45,6 +45,8 @@ FRBCStorageStatus, FRBCActuatorStatus, FRBCInstruction, + FRBCUsageForecast, + FRBCLeakageBehaviour, ) from s2python.message import S2Message from s2python.s2_parser import S2Parser @@ -67,19 +69,28 @@ def __init__(self): self.actuator_statuses: Dict[str, FRBCActuatorStatus] = ( {} ) # Changed to dict by actuator_id + self.usage_forecast: Optional[FRBCUsageForecast] = None + self.leakage_behaviour: Optional[FRBCLeakageBehaviour] = None self.resource_id: Optional[str] = None self.instructions: Optional[List[FRBCInstruction]] = [] def is_complete(self) -> bool: """Check if we have received all necessary data to generate instructions.""" - # Check basic required data + # System description and storage status are always required if ( self.system_description is None - or self.fill_level_target_profile is None or self.storage_status is None ): return False + # Fill level target profile OR usage forecast should be provided (at least one) + # Both are optional individually, but at least one should exist for meaningful planning + has_fill_level_target = self.fill_level_target_profile is not None + has_usage_forecast = self.usage_forecast is not None + + if not (has_fill_level_target or has_usage_forecast): + return False + # Check that we have actuator status for ALL actuators in system description if self.system_description.actuators: required_actuator_ids = { @@ -258,6 +269,12 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler( FRBCActuatorStatus, self.handle_frbc_actuator_status ) + self._handlers.register_handler( + FRBCUsageForecast, self.handle_frbc_usage_forecast + ) + self._handlers.register_handler( + FRBCLeakageBehaviour, self.handle_frbc_leakage_behaviour + ) self._handlers.register_handler( PowerMeasurement, self.handle_power_measurement ) @@ -288,6 +305,8 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: "FRBC.FillLevelTargetProfile": "๐ŸŽฏ", "FRBC.StorageStatus": "๐Ÿ”‹", "FRBC.ActuatorStatus": "โš™๏ธ", + "FRBC.UsageForecast": "๐Ÿ’ง", + "FRBC.LeakageBehaviour": "๐Ÿ”„", "InstructionStatusUpdate": "๐Ÿ“Š", "ResourceManagerDetails": "๐Ÿ“", "PowerMeasurement": "โšก", @@ -700,6 +719,63 @@ def handle_frbc_actuator_status( ) self._check_and_generate_instructions(resource_id, websocket) + def handle_frbc_usage_forecast( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, FRBCUsageForecast): + return + + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + self.ensure_resource_is_registered(resource_id=resource_id) + + self._device_data[resource_id].usage_forecast = message + n_elements = len(message.elements) if message.elements else 0 + + # Log usage forecast details + if message.elements: + try: + # Duration objects have a value in milliseconds + total_duration_ms = sum(int(elem.duration) for elem in message.elements) + total_duration_min = total_duration_ms / 60000 + self.app.logger.debug( + f" ๐Ÿ’ง Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" + ) + except Exception as e: + self.app.logger.debug( + f" ๐Ÿ’ง Start: {message.start_time.strftime('%H:%M:%S')}" + ) + + self.app.logger.info(f"๐Ÿ’ง UsageForecast: {n_elements} element(s)") + self._check_and_generate_instructions(resource_id, websocket) + + def handle_frbc_leakage_behaviour( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: + if not isinstance(message, FRBCLeakageBehaviour): + return + + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + self.ensure_resource_is_registered(resource_id=resource_id) + + self._device_data[resource_id].leakage_behaviour = message + n_elements = len(message.elements) if message.elements else 0 + + # Log leakage behaviour details + if message.elements: + try: + # Log first element's leakage rate as example + first_elem = message.elements[0] + leakage_rate = first_elem.leakage_rate + fill_range = first_elem.fill_level_range + self.app.logger.debug( + f" ๐Ÿ”„ Leakage rate: {leakage_rate}, Fill range: {fill_range.start_of_range}-{fill_range.end_of_range}" + ) + except Exception: + pass + + self.app.logger.info(f"๐Ÿ”„ LeakageBehaviour: {n_elements} element(s)") + self._check_and_generate_instructions(resource_id, websocket) + def ensure_resource_is_registered(self, resource_id: str): try: asset_type = get_or_create_model(AssetType, name="S2 Resource") @@ -824,6 +900,16 @@ def _check_and_generate_instructions( # noqa: C901 else: missing_items.append("โœ… FillLevelTargetProfile") + if not device_data.usage_forecast: + missing_items.append("โŒ UsageForecast") + else: + missing_items.append("โœ… UsageForecast") + + if not device_data.leakage_behaviour: + missing_items.append("โŒ LeakageBehaviour") + else: + missing_items.append("โœ… LeakageBehaviour") + if not device_data.storage_status: missing_items.append("โŒ StorageStatus") else: @@ -867,8 +953,8 @@ def _check_and_generate_instructions( # noqa: C901 missing = [] if not device_data.system_description: missing.append("SystemDescription") - if not device_data.fill_level_target_profile: - missing.append("FillLevelTargetProfile") + if not device_data.fill_level_target_profile and not device_data.usage_forecast: + missing.append("FillLevelTargetProfile or UsageForecast") if not device_data.storage_status: missing.append("StorageStatus") if ( @@ -953,7 +1039,7 @@ def _check_and_generate_instructions( # noqa: C901 self.s2_scheduler.end = start_aligned + timedelta(hours=24) # 24-hour planning window self.s2_scheduler.belief_time = start_aligned - self.app.logger.debug(f"๐Ÿ• Scheduler window: {start_aligned.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}") + self.app.logger.debug(f"๐Ÿ• Scheduler window: {self.s2_scheduler.start.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}") # Generate instructions using the scheduler (this may query the database for costs) schedule_results = self.s2_scheduler.compute() From 16e2c92df8e0f23c6c04fa3421d817f0351a2eca Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Thu, 6 Nov 2025 13:09:50 +0100 Subject: [PATCH 142/171] alignment for fill level profile and usage profile Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 52127f25c3..bc3aa77946 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -449,12 +449,7 @@ def _revoke_previous_instructions(self, connection_state: ConnectionState, webso ) in (InstructionStatus.NEW, InstructionStatus.ACCEPTED) ] - - if not instructions_to_revoke: - self.app.logger.info("๐Ÿ”„ No instructions to revoke (all processed)") - connection_state.sent_instructions.clear() - return - + self.app.logger.info( f"๐Ÿ—‘๏ธ Revoking {len(instructions_to_revoke)}/{len(connection_state.sent_instructions)} instructions" ) @@ -493,7 +488,6 @@ def _filter_instructions_by_operation_mode(self, instructions: list, connection_ last_operation_mode = instruction.operation_mode else: skipped += 1 - if skipped > 0: self.app.logger.info( f"๐Ÿ”ฝ Filtered: {len(instructions)} โ†’ {len(filtered)} instructions (skipped {skipped} duplicate modes)" @@ -767,33 +761,6 @@ def handle_frbc_leakage_behaviour( self.app.logger.info(f"๐Ÿ”„ LeakageBehaviour: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_instruction_status_update( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, FRBCInstructionStatusUpdate): - return - - # Map status types to emojis for better visibility - status_emojis = { - "NEW": "๐Ÿ†•", - "ACCEPTED": "โœ…", - "STARTED": "โ–ถ๏ธ", - "SUCCEEDED": "๐ŸŽ‰", - "ABORTED": "๐Ÿ›‘", - "REJECTED": "โŒ", - "REVOKED": "๐Ÿ—‘๏ธ", - } - - status_type = str(message.status_type) if hasattr(message, "status_type") else "UNKNOWN" - emoji = status_emojis.get(status_type, "๐Ÿ“Š") - - resource_id = self._websocket_to_resource.get(websocket, "unknown") - self.app.logger.info( - f"{emoji} Instruction Status Update from {resource_id}: " - f"instruction_id={message.instruction_id}, status={status_type}" - ) - self.app.logger.debug(message.to_json()) - def ensure_resource_is_registered(self, resource_id: str): try: asset_type = get_or_create_model(AssetType, name="S2 Resource") From 08d0ce6eb5dcc16b191e3e3985662586800020a9 Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Thu, 6 Nov 2025 13:15:38 +0100 Subject: [PATCH 143/171] alignment for fill level profile and usage profile Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 173 ++++++++++------------------------ 1 file changed, 48 insertions(+), 125 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index bc3aa77946..a1a85095fc 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -66,9 +66,7 @@ def __init__(self): self.system_description: Optional[FRBCSystemDescription] = None self.fill_level_target_profile: Optional[FRBCFillLevelTargetProfile] = None self.storage_status: Optional[FRBCStorageStatus] = None - self.actuator_statuses: Dict[str, FRBCActuatorStatus] = ( - {} - ) # Changed to dict by actuator_id + self.actuator_statuses: Dict[str, FRBCActuatorStatus] = {} # Changed to dict by actuator_id self.usage_forecast: Optional[FRBCUsageForecast] = None self.leakage_behaviour: Optional[FRBCLeakageBehaviour] = None self.resource_id: Optional[str] = None @@ -77,17 +75,14 @@ def __init__(self): def is_complete(self) -> bool: """Check if we have received all necessary data to generate instructions.""" # System description and storage status are always required - if ( - self.system_description is None - or self.storage_status is None - ): + if self.system_description is None or self.storage_status is None: return False # Fill level target profile OR usage forecast should be provided (at least one) # Both are optional individually, but at least one should exist for meaningful planning has_fill_level_target = self.fill_level_target_profile is not None has_usage_forecast = self.usage_forecast is not None - + if not (has_fill_level_target or has_usage_forecast): return False @@ -122,9 +117,7 @@ def update_compute_time(self) -> None: """Update the last compute time to now.""" self.last_compute_time = datetime.now(timezone.utc) - def update_instruction_status( - self, instruction_id: uuid.UUID, status: InstructionStatus - ) -> None: + def update_instruction_status(self, instruction_id: uuid.UUID, status: InstructionStatus) -> None: """Update the status of an instruction.""" self.instruction_statuses[instruction_id] = status @@ -233,27 +226,13 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler(ResourceManagerDetails, self.handle_ResourceManagerDetails) self._handlers.register_handler(InstructionStatusUpdate, self.handle_instruction_status_update) # Register FRBC message handlers - self._handlers.register_handler( - FRBCSystemDescription, self.handle_frbc_system_description - ) - self._handlers.register_handler( - FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile - ) - self._handlers.register_handler( - FRBCStorageStatus, self.handle_frbc_storage_status - ) - self._handlers.register_handler( - FRBCActuatorStatus, self.handle_frbc_actuator_status - ) - self._handlers.register_handler( - FRBCUsageForecast, self.handle_frbc_usage_forecast - ) - self._handlers.register_handler( - FRBCLeakageBehaviour, self.handle_frbc_leakage_behaviour - ) - self._handlers.register_handler( - PowerMeasurement, self.handle_power_measurement - ) + self._handlers.register_handler(FRBCSystemDescription, self.handle_frbc_system_description) + self._handlers.register_handler(FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile) + self._handlers.register_handler(FRBCStorageStatus, self.handle_frbc_storage_status) + self._handlers.register_handler(FRBCActuatorStatus, self.handle_frbc_actuator_status) + self._handlers.register_handler(FRBCUsageForecast, self.handle_frbc_usage_forecast) + self._handlers.register_handler(FRBCLeakageBehaviour, self.handle_frbc_leakage_behaviour) + self._handlers.register_handler(PowerMeasurement, self.handle_power_measurement) def _ws_handler(self, ws: Sock) -> None: try: @@ -444,12 +423,10 @@ def _revoke_previous_instructions(self, connection_state: ConnectionState, webso instructions_to_revoke = [ instr for instr in connection_state.sent_instructions - if connection_state.instruction_statuses.get( - instr.message_id, InstructionStatus.NEW - ) + if connection_state.instruction_statuses.get(instr.message_id, InstructionStatus.NEW) in (InstructionStatus.NEW, InstructionStatus.ACCEPTED) ] - + self.app.logger.info( f"๐Ÿ—‘๏ธ Revoking {len(instructions_to_revoke)}/{len(connection_state.sent_instructions)} instructions" ) @@ -461,12 +438,8 @@ def _revoke_previous_instructions(self, connection_state: ConnectionState, webso object_id=instruction.id, ) self._send_and_forget(revoke_msg, websocket) - status = connection_state.instruction_statuses.get( - instruction.message_id, InstructionStatus.NEW - ) - self.app.logger.debug( - f" ๐Ÿšซ Revoked instruction {str(instruction.id)[:8]}... ({status.value})" - ) + status = connection_state.instruction_statuses.get(instruction.message_id, InstructionStatus.NEW) + self.app.logger.debug(f" ๐Ÿšซ Revoked instruction {str(instruction.id)[:8]}... ({status.value})") # Clear the list of sent instructions after revoking connection_state.sent_instructions.clear() @@ -529,6 +502,7 @@ def handle_ResourceManagerDetails(self, _: "S2FlaskWSServerSync", message: S2Mes return # Store the resource_id from ResourceManagerDetails for device identification + resource_id = str(message.resource_id) self._websocket_to_resource[websocket] = resource_id if resource_id not in self._device_data: @@ -559,9 +533,7 @@ def handle_instruction_status_update(self, _: "S2FlaskWSServerSync", message: S2 instr_id_full = str(message.instruction_id) instr_id_short = instr_id_full[:8] - self.app.logger.info( - f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}" - ) + self.app.logger.info(f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}") self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") # If instruction is rejected, aborted, or revoked, remove it from sent_instructions @@ -571,9 +543,7 @@ def handle_instruction_status_update(self, _: "S2FlaskWSServerSync", message: S2 ): # Remove the instruction from sent_instructions list connection_state.sent_instructions = [ - instr - for instr in connection_state.sent_instructions - if instr.message_id != message.instruction_id + instr for instr in connection_state.sent_instructions if instr.message_id != message.instruction_id ] self.app.logger.debug(f" ๐Ÿ—‘๏ธ Removed {instr_id_short}... from memory") @@ -595,9 +565,7 @@ def handle_frbc_system_description(self, _: "S2FlaskWSServerSync", message: S2Me self.app.logger.debug( f" โš™๏ธ Actuator {str(actuator.id)[:8]}...: {n_modes} modes, {n_transitions} transitions, {n_timers} timers" ) - self.ensure_actuator_is_registered( - actuator_id=str(actuator.id), resource_id=resource_id - ) + self.ensure_actuator_is_registered(actuator_id=str(actuator.id), resource_id=resource_id) self.ensure_actuator_is_registered(actuator_id=str(actuator.id), resource_id=resource_id) # Log storage details @@ -634,16 +602,12 @@ def handle_frbc_fill_level_target_profile( f" ๐ŸŽฏ Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" ) except Exception as e: - self.app.logger.debug( - f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}" - ) + self.app.logger.debug(f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}") self.app.logger.info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) - def handle_power_measurement( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_power_measurement(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, PowerMeasurement): return @@ -661,13 +625,9 @@ def handle_power_measurement( resource_or_actuator_id=resource_id, ) except Exception as exc: - self.app.logger.warning( - f"PowerMeasurement could not be saved: {str(exc)}" - ) + self.app.logger.warning(f"PowerMeasurement could not be saved: {str(exc)}") - def handle_frbc_storage_status( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_frbc_storage_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, FRBCStorageStatus): return resource_id = self._websocket_to_resource.get(websocket, "default_resource") @@ -696,17 +656,11 @@ def handle_frbc_actuator_status(self, _: "S2FlaskWSServerSync", message: S2Messa self.ensure_resource_is_registered(resource_id=resource_id) # Store actuator status by actuator_id to support multiple actuators - self._device_data[resource_id].actuator_statuses[ - str(message.actuator_id) - ] = message - self.app.logger.debug( - f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}" - ) + self._device_data[resource_id].actuator_statuses[str(message.actuator_id)] = message + self.app.logger.debug(f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}") self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_usage_forecast( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_frbc_usage_forecast(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, FRBCUsageForecast): return @@ -715,7 +669,7 @@ def handle_frbc_usage_forecast( self._device_data[resource_id].usage_forecast = message n_elements = len(message.elements) if message.elements else 0 - + # Log usage forecast details if message.elements: try: @@ -726,16 +680,12 @@ def handle_frbc_usage_forecast( f" ๐Ÿ’ง Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" ) except Exception as e: - self.app.logger.debug( - f" ๐Ÿ’ง Start: {message.start_time.strftime('%H:%M:%S')}" - ) + self.app.logger.debug(f" ๐Ÿ’ง Start: {message.start_time.strftime('%H:%M:%S')}") self.app.logger.info(f"๐Ÿ’ง UsageForecast: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_leakage_behaviour( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: + def handle_frbc_leakage_behaviour(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: if not isinstance(message, FRBCLeakageBehaviour): return @@ -744,7 +694,7 @@ def handle_frbc_leakage_behaviour( self._device_data[resource_id].leakage_behaviour = message n_elements = len(message.elements) if message.elements else 0 - + # Log leakage behaviour details if message.elements: try: @@ -834,8 +784,7 @@ def save_event( belief = TimedBelief( sensor=sensor, source=data_source, - event_start=event_start - or floored_server_now(self._minimum_measurement_period), + event_start=event_start or floored_server_now(self._minimum_measurement_period), event_value=event_value, belief_time=server_now(), cumulative_probability=0.5, @@ -890,13 +839,8 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> missing_items.append("โœ… StorageStatus") # Check actuator statuses in detail - if ( - device_data.system_description - and device_data.system_description.actuators - ): - required_actuators = { - str(a.id) for a in device_data.system_description.actuators - } + if device_data.system_description and device_data.system_description.actuators: + required_actuators = {str(a.id) for a in device_data.system_description.actuators} received_actuators = set(device_data.actuator_statuses.keys()) missing_actuators = required_actuators - received_actuators @@ -905,13 +849,9 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> f"โŒ ActuatorStatus ({len(received_actuators)}/{len(required_actuators)} received)" ) for missing_id in missing_actuators: - self.app.logger.debug( - f" โณ Missing actuator status for: {missing_id}" - ) + self.app.logger.debug(f" โณ Missing actuator status for: {missing_id}") else: - missing_items.append( - f"โœ… ActuatorStatus (all {len(required_actuators)} received)" - ) + missing_items.append(f"โœ… ActuatorStatus (all {len(required_actuators)} received)") else: missing_items.append("โŒ ActuatorStatus (no actuators defined)") @@ -931,13 +871,8 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> missing.append("FillLevelTargetProfile or UsageForecast") if not device_data.storage_status: missing.append("StorageStatus") - if ( - device_data.system_description - and device_data.system_description.actuators - ): - required = { - str(a.id) for a in device_data.system_description.actuators - } + if device_data.system_description and device_data.system_description.actuators: + required = {str(a.id) for a in device_data.system_description.actuators} received = set(device_data.actuator_statuses.keys()) if required - received: missing.append("ActuatorStatus") @@ -996,18 +931,16 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> now = datetime.now(timezone.utc) future_time = now + timedelta(minutes=15) minutes_offset = future_time.minute % 5 - start_aligned = future_time.replace( - minute=future_time.minute - minutes_offset, second=0, microsecond=0 - ) + start_aligned = future_time.replace(minute=future_time.minute - minutes_offset, second=0, microsecond=0) # Update scheduler time window self.s2_scheduler.start = start_aligned - self.s2_scheduler.end = start_aligned + timedelta( - hours=24 - ) # 24-hour planning window + self.s2_scheduler.end = start_aligned + timedelta(hours=24) # 24-hour planning window self.s2_scheduler.belief_time = start_aligned - - self.app.logger.debug(f"๐Ÿ• Scheduler window: {self.s2_scheduler.start.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}") + + self.app.logger.debug( + f"๐Ÿ• Scheduler window: {self.s2_scheduler.start.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}" + ) # Generate instructions using the scheduler (this may query the database for costs) schedule_results = self.s2_scheduler.compute() @@ -1021,9 +954,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> # Log instruction summary before sending if filtered_instructions: - self.app.logger.info( - f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):" - ) + self.app.logger.info(f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):") # Send new instructions and store them current_time = datetime.now(timezone.utc) @@ -1066,15 +997,9 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> ) # Log full IDs at debug level - self.app.logger.debug( - f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}" - ) - self.app.logger.debug( - f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}" - ) - self.app.logger.debug( - f" โš™๏ธ Full actuator ID: {actuator_id_full}" - ) + self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") + self.app.logger.debug(f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}") + self.app.logger.debug(f" โš™๏ธ Full actuator ID: {actuator_id_full}") # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode @@ -1112,9 +1037,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> data_source=self.s2_scheduler.data_source, ) if energy_data_count > 0: - self.app.logger.info( - f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)" - ) + self.app.logger.info(f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)") except Exception as exc: self.app.logger.warning(f"โš ๏ธ Energy data save failed: {str(exc)}") else: From c5ea21d4b34e79afc2a98118dc6ea5ad06c7b27e Mon Sep 17 00:00:00 2001 From: Vlad Iftime Date: Thu, 6 Nov 2025 13:21:06 +0100 Subject: [PATCH 144/171] alignment for fill level profile and usage profile Signed-off-by: Vlad Iftime --- flexmeasures/ws/s2_ws_sync.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index a1a85095fc..98064ae261 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -552,6 +552,7 @@ def handle_frbc_system_description(self, _: "S2FlaskWSServerSync", message: S2Me return # Get resource_id from websocket mapping + resource_id = self._websocket_to_resource.get(websocket, "default_resource") self.ensure_resource_is_registered(resource_id=resource_id) self._device_data[resource_id].system_description = message From 7b4c92560daf5d8f7040c06f18f779470d0f458e Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 7 Nov 2025 15:52:25 +0100 Subject: [PATCH 145/171] fix: fetch fresh data source Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 98064ae261..52a4148d4e 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -759,6 +759,12 @@ def save_event( event_unit: str = "", sensor_unit: str = "", ): + try: + data_source = db.session.get(Source, data_source.id) + except Exception as exc: + self.app.logger.warning( + f"Data source {data_source} could not be freshly fetched: {str(exc)}" + ) if event_resolution is None: event_resolution = timedelta(0) try: From 609f027195497e67bfa6b68cca65a4eba4724ecf Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 7 Nov 2025 16:13:12 +0100 Subject: [PATCH 146/171] fix: save PowerMeasurement Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 52a4148d4e..0be8e5b27c 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -225,6 +225,7 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) self._handlers.register_handler(ResourceManagerDetails, self.handle_ResourceManagerDetails) self._handlers.register_handler(InstructionStatusUpdate, self.handle_instruction_status_update) + self._handlers.register_handler(PowerMeasurement, self.handle_power_measurement) # Register FRBC message handlers self._handlers.register_handler(FRBCSystemDescription, self.handle_frbc_system_description) self._handlers.register_handler(FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile) @@ -232,7 +233,6 @@ def _register_default_handlers(self) -> None: self._handlers.register_handler(FRBCActuatorStatus, self.handle_frbc_actuator_status) self._handlers.register_handler(FRBCUsageForecast, self.handle_frbc_usage_forecast) self._handlers.register_handler(FRBCLeakageBehaviour, self.handle_frbc_leakage_behaviour) - self._handlers.register_handler(PowerMeasurement, self.handle_power_measurement) def _ws_handler(self, ws: Sock) -> None: try: @@ -620,7 +620,7 @@ def handle_power_measurement(self, _: "S2FlaskWSServerSync", message: S2Message, try: self.save_event( sensor_name=measurement.commodity_quantity, - event_value=message.values, + event_value=message.value, event_start=message.measurement_timestamp, data_source=db.session.get(Source, self.data_source_id), resource_or_actuator_id=resource_id, From 7dfb6a90a3f94b11a4e4b3656ac5da03fb4f434c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 7 Nov 2025 16:34:32 +0100 Subject: [PATCH 147/171] fix: save measurement value rather than message value Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 0be8e5b27c..5a5ca72187 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -620,7 +620,7 @@ def handle_power_measurement(self, _: "S2FlaskWSServerSync", message: S2Message, try: self.save_event( sensor_name=measurement.commodity_quantity, - event_value=message.value, + event_value=measurement.value, event_start=message.measurement_timestamp, data_source=db.session.get(Source, self.data_source_id), resource_or_actuator_id=resource_id, From c9592e853d4da859a74037353515026c76bc6148 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 7 Nov 2025 16:44:44 +0100 Subject: [PATCH 148/171] refactor: pass data source ID instead of data source Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 5a5ca72187..fc4416fd7e 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -622,7 +622,7 @@ def handle_power_measurement(self, _: "S2FlaskWSServerSync", message: S2Message, sensor_name=measurement.commodity_quantity, event_value=measurement.value, event_start=message.measurement_timestamp, - data_source=db.session.get(Source, self.data_source_id), + data_source_id=self.data_source_id, resource_or_actuator_id=resource_id, ) except Exception as exc: @@ -641,7 +641,7 @@ def handle_frbc_storage_status(self, _: "S2FlaskWSServerSync", message: S2Messag self.save_event( sensor_name="fill level", event_value=message.present_fill_level, - data_source=db.session.get(Source, self.data_source_id), + data_source_id=self.data_source_id, resource_or_actuator_id=resource_id, ) self.app.logger.info(f"๐Ÿ”‹ StorageStatus: {message.present_fill_level:.1f}%") @@ -753,14 +753,14 @@ def save_event( sensor_name: str, resource_or_actuator_id: str, event_value: float | pd.Series, - data_source: Source, + data_source_id: int, event_start: str | None = None, event_resolution: timedelta | None = None, event_unit: str = "", sensor_unit: str = "", ): try: - data_source = db.session.get(Source, data_source.id) + data_source = db.session.get(Source, data_source_id) except Exception as exc: self.app.logger.warning( f"Data source {data_source} could not be freshly fetched: {str(exc)}" @@ -1030,7 +1030,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> sensor_name="power", resource_or_actuator_id=str(result["device"]), event_value=result["data"], - data_source=self.s2_scheduler.data_source, + data_source=self.s2_scheduler.data_source.id, event_resolution=self.s2_scheduler.resolution, event_unit=result["unit"], sensor_unit="W", @@ -1041,7 +1041,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> sensor_name="fill level", resource_or_actuator_id=str(result["fill level"]), event_value=result["data"], - data_source=self.s2_scheduler.data_source, + data_source=self.s2_scheduler.data_source.id, ) if energy_data_count > 0: self.app.logger.info(f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)") From ef668395bf4bf2e94b11a1dce50b0dc8a6992f04 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 7 Nov 2025 16:47:46 +0100 Subject: [PATCH 149/171] dev: log saved events Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index fc4416fd7e..2bad322886 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -809,6 +809,7 @@ def save_event( logger.error(f"Cannot save event values of type {type(event_value)}.") return save_to_db(bdf) + self.app.logger.debug(f"โœ… {capitalize(sensor_name)} saved successfully: {bdf}") except Exception as exc: self.app.logger.warning(f"{capitalize(sensor_name)} could not be saved as sensor data: {str(exc)}") From 2c93c5020ffd23ccd986d58d89ac647171014332 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 7 Nov 2025 16:53:27 +0100 Subject: [PATCH 150/171] fix: use new kwarg name Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 2bad322886..b9a0c50088 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -1031,7 +1031,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> sensor_name="power", resource_or_actuator_id=str(result["device"]), event_value=result["data"], - data_source=self.s2_scheduler.data_source.id, + data_source_id=self.s2_scheduler.data_source.id, event_resolution=self.s2_scheduler.resolution, event_unit=result["unit"], sensor_unit="W", @@ -1042,7 +1042,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> sensor_name="fill level", resource_or_actuator_id=str(result["fill level"]), event_value=result["data"], - data_source=self.s2_scheduler.data_source.id, + data_source_id=self.s2_scheduler.data_source.id, ) if energy_data_count > 0: self.app.logger.info(f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)") From 2481f460f856061bed40d3dd0a9dab266e373c9a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 7 Nov 2025 17:18:06 +0100 Subject: [PATCH 151/171] fix: save fill level events from two sources Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index b9a0c50088..b96b645939 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -747,7 +747,7 @@ def save_attribute(self, resource_id: str, **kwargs): except Exception as exc: self.app.logger.warning(f"Failed to save {k}: {v} as an asset attribute of {asset}: {str(exc)}") - @only_if_timer_due("sensor_name", "resource_or_actuator_id") + @only_if_timer_due("sensor_name", "resource_or_actuator_id", "data_source_id") def save_event( self, sensor_name: str, From 3e0c6f69fe2303d1dd0c13bf021deb4cd0ff813b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 9 Nov 2025 09:51:20 +0100 Subject: [PATCH 152/171] fix: warning message Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index b96b645939..5c6da21791 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -759,12 +759,6 @@ def save_event( event_unit: str = "", sensor_unit: str = "", ): - try: - data_source = db.session.get(Source, data_source_id) - except Exception as exc: - self.app.logger.warning( - f"Data source {data_source} could not be freshly fetched: {str(exc)}" - ) if event_resolution is None: event_resolution = timedelta(0) try: @@ -787,6 +781,12 @@ def save_event( sensor_unit, event_resolution=self.s2_scheduler.resolution, ) + try: + data_source = db.session.get(Source, data_source_id) + except Exception as exc: + self.app.logger.warning( + f"Data source {data_source_id} could not be freshly fetched: {str(exc)}" + ) if isinstance(event_value, float): belief = TimedBelief( sensor=sensor, From b9a0feb58675fc3a2d48a15311e27e0adf0a55fc Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 3 Dec 2025 12:30:04 +0100 Subject: [PATCH 153/171] dev: namespace the logging of S2 related contents using the RM name Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 468 +++++++++++++++++++++++++--------- 1 file changed, 342 insertions(+), 126 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index 5c6da21791..e27dc5e5ef 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -66,11 +66,14 @@ def __init__(self): self.system_description: Optional[FRBCSystemDescription] = None self.fill_level_target_profile: Optional[FRBCFillLevelTargetProfile] = None self.storage_status: Optional[FRBCStorageStatus] = None - self.actuator_statuses: Dict[str, FRBCActuatorStatus] = {} # Changed to dict by actuator_id + self.actuator_statuses: Dict[str, FRBCActuatorStatus] = ( + {} + ) # Changed to dict by actuator_id self.usage_forecast: Optional[FRBCUsageForecast] = None self.leakage_behaviour: Optional[FRBCLeakageBehaviour] = None self.resource_id: Optional[str] = None self.instructions: Optional[List[FRBCInstruction]] = [] + self.logger = None def is_complete(self) -> bool: """Check if we have received all necessary data to generate instructions.""" @@ -88,7 +91,9 @@ def is_complete(self) -> bool: # Check that we have actuator status for ALL actuators in system description if self.system_description.actuators: - required_actuator_ids = {str(actuator.id) for actuator in self.system_description.actuators} + required_actuator_ids = { + str(actuator.id) for actuator in self.system_description.actuators + } received_actuator_ids = set(self.actuator_statuses.keys()) return required_actuator_ids.issubset(received_actuator_ids) @@ -102,7 +107,9 @@ def __init__(self): self.last_compute_time: Optional[datetime] = None self.resource_id: Optional[str] = None self.last_operation_mode: Optional[uuid.UUID] = None - self.sent_instructions: List[FRBCInstruction] = [] # Store sent instructions for revocation + self.sent_instructions: List[FRBCInstruction] = ( + [] + ) # Store sent instructions for revocation self.instruction_statuses: Dict[uuid.UUID, InstructionStatus] = ( {} ) # Track status of each instruction by instruction_id @@ -111,13 +118,17 @@ def can_compute(self, replanning_frequency: timedelta) -> bool: """Check if enough time has passed since the last compute call.""" if self.last_compute_time is None: return True - return datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency + return ( + datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency + ) def update_compute_time(self) -> None: """Update the last compute time to now.""" self.last_compute_time = datetime.now(timezone.utc) - def update_instruction_status(self, instruction_id: uuid.UUID, status: InstructionStatus) -> None: + def update_instruction_status( + self, instruction_id: uuid.UUID, status: InstructionStatus + ) -> None: """Update the status of an instruction.""" self.instruction_statuses[instruction_id] = status @@ -164,7 +175,9 @@ def handle_message( f"Ignoring message of type {type(msg)}; no handler is registered", ) - def register_handler(self, msg_type: Type[S2Message], handler: Callable[..., Any]) -> None: + def register_handler( + self, msg_type: Type[S2Message], handler: Callable[..., Any] + ) -> None: self.handlers[msg_type] = handler @@ -185,9 +198,15 @@ def __init__( self._handlers = MessageHandlersSync() self.s2_parser = S2Parser() self._connections: Dict[str, Sock] = {} - self._device_data: Dict[str, FRBCDeviceData] = {} # Store device data by resource_id - self._websocket_to_resource: Dict[Sock, str] = {} # Map websocket to resource_id - self._connection_states: Dict[Sock, ConnectionState] = {} # Track connection state for rate limiting + self._device_data: Dict[str, FRBCDeviceData] = ( + {} + ) # Store device data by resource_id + self._websocket_to_resource: Dict[Sock, str] = ( + {} + ) # Map websocket to resource_id + self._connection_states: Dict[Sock, ConnectionState] = ( + {} + ) # Track connection state for rate limiting self._register_default_handlers() self.sock.route(self.ws_path)(self._ws_handler) self.s2_scheduler = None @@ -210,29 +229,51 @@ def _is_timer_due(self, name: str) -> bool: seconds_since_hour = now.minute * 60 + now.second + now.microsecond / 1e6 # Ceil to next multiple of period_seconds - next_tick_seconds = math.ceil(seconds_since_hour / period_seconds) * period_seconds + next_tick_seconds = ( + math.ceil(seconds_since_hour / period_seconds) * period_seconds + ) # Compute next due datetime - next_due = now.replace(minute=0, second=0, microsecond=0) + timedelta(seconds=next_tick_seconds) + next_due = now.replace(minute=0, second=0, microsecond=0) + timedelta( + seconds=next_tick_seconds + ) self._timers[name] = next_due return True else: - self.app.logger.debug(f"Timer for {name} is not due until {self._timers[name]}") + self.app.logger.debug( + f"Timer for {name} is not due until {self._timers[name]}" + ) return False def _register_default_handlers(self) -> None: self._handlers.register_handler(Handshake, self.handle_handshake) self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) - self._handlers.register_handler(ResourceManagerDetails, self.handle_ResourceManagerDetails) - self._handlers.register_handler(InstructionStatusUpdate, self.handle_instruction_status_update) + self._handlers.register_handler( + ResourceManagerDetails, self.handle_ResourceManagerDetails + ) + self._handlers.register_handler( + InstructionStatusUpdate, self.handle_instruction_status_update + ) self._handlers.register_handler(PowerMeasurement, self.handle_power_measurement) # Register FRBC message handlers - self._handlers.register_handler(FRBCSystemDescription, self.handle_frbc_system_description) - self._handlers.register_handler(FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile) - self._handlers.register_handler(FRBCStorageStatus, self.handle_frbc_storage_status) - self._handlers.register_handler(FRBCActuatorStatus, self.handle_frbc_actuator_status) - self._handlers.register_handler(FRBCUsageForecast, self.handle_frbc_usage_forecast) - self._handlers.register_handler(FRBCLeakageBehaviour, self.handle_frbc_leakage_behaviour) + self._handlers.register_handler( + FRBCSystemDescription, self.handle_frbc_system_description + ) + self._handlers.register_handler( + FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile + ) + self._handlers.register_handler( + FRBCStorageStatus, self.handle_frbc_storage_status + ) + self._handlers.register_handler( + FRBCActuatorStatus, self.handle_frbc_actuator_status + ) + self._handlers.register_handler( + FRBCUsageForecast, self.handle_frbc_usage_forecast + ) + self._handlers.register_handler( + FRBCLeakageBehaviour, self.handle_frbc_leakage_behaviour + ) def _ws_handler(self, ws: Sock) -> None: try: @@ -280,14 +321,18 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: except json.JSONDecodeError: self.app.logger.warning(f"โŒ Invalid JSON from client {client_id}") self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Not valid json.", websocket=websocket, ) continue except S2ValidationError as e: - self.app.logger.warning(f"โŒ S2 validation error from client {client_id}: {str(e)}") + self.app.logger.warning( + f"โŒ S2 validation error from client {client_id}: {str(e)}" + ) try: json_msg = json.loads(message) message_id = json_msg.get("message_id") @@ -300,14 +345,18 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: ) else: self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Message appears valid json but could not find a message_id field.", websocket=websocket, ) except json.JSONDecodeError: self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Not valid json.", websocket=websocket, @@ -332,7 +381,9 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: self.app.logger.warning(f"โš ๏ธ DB commit failed: {str(exc)}") except json.JSONDecodeError: self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Not valid json.", websocket=websocket, @@ -349,13 +400,17 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: ) else: self.respond_with_reception_status( - subject_message_id=uuid.UUID("00000000-0000-0000-0000-000000000000"), + subject_message_id=uuid.UUID( + "00000000-0000-0000-0000-000000000000" + ), status=ReceptionStatusValues.INVALID_DATA, diagnostic_label="Message appears valid json but could not find a message_id field.", websocket=websocket, ) except Exception as e: - self.app.logger.error(f"๐Ÿ’ฅ Error processing message from client {client_id}: {str(e)}") + self.app.logger.error( + f"๐Ÿ’ฅ Error processing message from client {client_id}: {str(e)}" + ) raise except ConnectionClosed: self.app.logger.info(f"๐Ÿ”Œ Connection closed: {client_id[:8]}...") @@ -366,12 +421,16 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: if websocket in self._websocket_to_resource: resource_id = self._websocket_to_resource[websocket] del self._websocket_to_resource[websocket] - self.app.logger.debug(f"๐Ÿงน Cleaned up resource mapping for {resource_id}") + self.app.logger.debug( + f"๐Ÿงน Cleaned up resource mapping for {resource_id}" + ) # Clean up device data if resource_id in self._device_data: del self._device_data[resource_id] - self.app.logger.debug(f"๐Ÿงน Cleaned up device data for {resource_id}") + self.app.logger.debug( + f"๐Ÿงน Cleaned up device data for {resource_id}" + ) # Clean up device state from scheduler if available if ( @@ -380,7 +439,9 @@ def _handle_websocket_connection(self, websocket: Sock) -> None: and hasattr(self.s2_scheduler, "remove_device_state") ): self.s2_scheduler.remove_device_state(resource_id) - self.app.logger.debug(f"๐Ÿงน Cleaned up scheduler state for {resource_id}") + self.app.logger.debug( + f"๐Ÿงน Cleaned up scheduler state for {resource_id}" + ) # Clean up connection state if websocket in self._connection_states: @@ -401,19 +462,21 @@ def respond_with_reception_status( diagnostic_label=diagnostic_label, ) status_emoji = "โœ…" if status == ReceptionStatusValues.OK else "โŒ" - self.app.logger.debug(f"{status_emoji} ReceptionStatus: {status}") + self._logger(websocket).debug(f"{status_emoji} ReceptionStatus: {status}") try: websocket.send(response.to_json()) except ConnectionClosed: - self.app.logger.warning("โš ๏ธ Connection closed during response") + self._logger(websocket).warning("โš ๏ธ Connection closed during response") def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: try: websocket.send(s2_msg.to_json()) except ConnectionClosed: - self.app.logger.warning("โš ๏ธ Connection closed during send") + self._logger(websocket).warning("โš ๏ธ Connection closed during send") - def _revoke_previous_instructions(self, connection_state: ConnectionState, websocket: Sock) -> None: + def _revoke_previous_instructions( + self, connection_state: ConnectionState, websocket: Sock + ) -> None: """Revoke all previously sent instructions that are still ACCEPTED or NEW before sending new ones.""" if not connection_state.sent_instructions: return @@ -423,11 +486,13 @@ def _revoke_previous_instructions(self, connection_state: ConnectionState, webso instructions_to_revoke = [ instr for instr in connection_state.sent_instructions - if connection_state.instruction_statuses.get(instr.message_id, InstructionStatus.NEW) + if connection_state.instruction_statuses.get( + instr.message_id, InstructionStatus.NEW + ) in (InstructionStatus.NEW, InstructionStatus.ACCEPTED) ] - self.app.logger.info( + self._logger(websocket).info( f"๐Ÿ—‘๏ธ Revoking {len(instructions_to_revoke)}/{len(connection_state.sent_instructions)} instructions" ) @@ -438,13 +503,19 @@ def _revoke_previous_instructions(self, connection_state: ConnectionState, webso object_id=instruction.id, ) self._send_and_forget(revoke_msg, websocket) - status = connection_state.instruction_statuses.get(instruction.message_id, InstructionStatus.NEW) - self.app.logger.debug(f" ๐Ÿšซ Revoked instruction {str(instruction.id)[:8]}... ({status.value})") + status = connection_state.instruction_statuses.get( + instruction.message_id, InstructionStatus.NEW + ) + self._logger(websocket).debug( + f" ๐Ÿšซ Revoked instruction {str(instruction.id)[:8]}... ({status.value})" + ) # Clear the list of sent instructions after revoking connection_state.sent_instructions.clear() - def _filter_instructions_by_operation_mode(self, instructions: list, connection_state: ConnectionState) -> list: + def _filter_instructions_by_operation_mode( + self, instructions: list, connection_state: ConnectionState, websocket: Sock + ) -> list: """Filter instructions to only include those with different operation_mode than the previous instruction.""" if not instructions: return instructions @@ -456,25 +527,32 @@ def _filter_instructions_by_operation_mode(self, instructions: list, connection_ for instruction in instructions: # Always include the first instruction if we haven't sent any before # or if the operation mode is different from the last sent instruction - if last_operation_mode is None or instruction.operation_mode != last_operation_mode: + if ( + last_operation_mode is None + or instruction.operation_mode != last_operation_mode + ): filtered.append(instruction) last_operation_mode = instruction.operation_mode else: skipped += 1 if skipped > 0: - self.app.logger.info( + self._logger(websocket).info( f"๐Ÿ”ฝ Filtered: {len(instructions)} โ†’ {len(filtered)} instructions (skipped {skipped} duplicate modes)" ) return filtered - def handle_handshake(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_handshake( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, Handshake): return self.app.logger.debug(message.to_json()) if S2_VERSION not in message.supported_protocol_versions: - raise NotImplementedError(f"Server protocol {S2_VERSION} not supported by client") + raise NotImplementedError( + f"Server protocol {S2_VERSION} not supported by client" + ) handshake_response = HandshakeResponse( message_id=uuid.uuid4(), @@ -490,14 +568,18 @@ def handle_handshake(self, _: "S2FlaskWSServerSync", message: S2Message, websock control_type=ControlType.FILL_RATE_BASED_CONTROL, ) self._send_and_forget(select_control_type, websocket) - self.app.logger.info("๐Ÿ“ค SelectControlType: FRBC") + self._logger(websocket).info("๐Ÿ“ค SelectControlType: FRBC") - def handle_reception_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_reception_status( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, ReceptionStatus): return - self.app.logger.debug(message.to_json()) + self._logger(websocket).debug(message.to_json()) - def handle_ResourceManagerDetails(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_ResourceManagerDetails( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, ResourceManagerDetails): return @@ -507,18 +589,33 @@ def handle_ResourceManagerDetails(self, _: "S2FlaskWSServerSync", message: S2Mes if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() - self._device_data[resource_id].resource_id = resource_id + + dd = self._device_data[resource_id] + dd.resource_id = resource_id + + # Create namespaced logger + # safe name (no spaces/slashes etc) + rm_name = str(message.name).replace(" ", "_") + dd.logger = logging.getLogger(f"flexmeasures_s2.rm.{rm_name}") + + # Inherit app logger handlers/level + dd.logger.setLevel(self.app.logger.level) self.app.logger.info(f"๐Ÿ“ RM registered: {resource_id[:8]}... ({message.name})") + dd.logger.info("RM logger initialized") - def handle_instruction_status_update(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_instruction_status_update( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, InstructionStatusUpdate): return # Get the connection state and update instruction status connection_state = self._connection_states.get(websocket) if connection_state: - connection_state.update_instruction_status(message.instruction_id, message.status_type) + connection_state.update_instruction_status( + message.instruction_id, message.status_type + ) # Status emoji mapping status_emoji = { @@ -533,8 +630,10 @@ def handle_instruction_status_update(self, _: "S2FlaskWSServerSync", message: S2 instr_id_full = str(message.instruction_id) instr_id_short = instr_id_full[:8] - self.app.logger.info(f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}") - self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") + self._logger(websocket).info( + f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}" + ) + self._logger(websocket).debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") # If instruction is rejected, aborted, or revoked, remove it from sent_instructions if message.status_type not in ( @@ -543,11 +642,17 @@ def handle_instruction_status_update(self, _: "S2FlaskWSServerSync", message: S2 ): # Remove the instruction from sent_instructions list connection_state.sent_instructions = [ - instr for instr in connection_state.sent_instructions if instr.message_id != message.instruction_id + instr + for instr in connection_state.sent_instructions + if instr.message_id != message.instruction_id ] - self.app.logger.debug(f" ๐Ÿ—‘๏ธ Removed {instr_id_short}... from memory") + self._logger(websocket).debug( + f" ๐Ÿ—‘๏ธ Removed {instr_id_short}... from memory" + ) - def handle_frbc_system_description(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_frbc_system_description( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, FRBCSystemDescription): return @@ -563,20 +668,24 @@ def handle_frbc_system_description(self, _: "S2FlaskWSServerSync", message: S2Me n_modes = len(actuator.operation_modes) if actuator.operation_modes else 0 n_transitions = len(actuator.transitions) if actuator.transitions else 0 n_timers = len(actuator.timers) if actuator.timers else 0 - self.app.logger.debug( + self._logger(websocket).debug( f" โš™๏ธ Actuator {str(actuator.id)[:8]}...: {n_modes} modes, {n_transitions} transitions, {n_timers} timers" ) - self.ensure_actuator_is_registered(actuator_id=str(actuator.id), resource_id=resource_id) - self.ensure_actuator_is_registered(actuator_id=str(actuator.id), resource_id=resource_id) + self.ensure_actuator_is_registered( + actuator_id=str(actuator.id), resource_id=resource_id + ) + self.ensure_actuator_is_registered( + actuator_id=str(actuator.id), resource_id=resource_id + ) # Log storage details if message.storage: - self.app.logger.debug( + self._logger(websocket).debug( f" ๐Ÿ’พ Storage: {message.storage.fill_level_range.start_of_range}-{message.storage.fill_level_range.end_of_range} {message.storage.fill_level_label or '%'}" ) self.save_attribute(resource_id, **json.loads(message.to_json())) - self.app.logger.info(f"๐Ÿ“‹ SystemDescription: {n_actuators} actuator(s)") + self._logger(websocket).info(f"๐Ÿ“‹ SystemDescription: {n_actuators} actuator(s)") self._check_and_generate_instructions(resource_id, websocket) def handle_frbc_fill_level_target_profile( @@ -585,8 +694,10 @@ def handle_frbc_fill_level_target_profile( if not isinstance(message, FRBCFillLevelTargetProfile): return resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self.app.logger.info(f"๐ŸŽฏ Received FRBCFillLevelTargetProfile for {resource_id}") - self.app.logger.debug(message.to_json()) + self._logger(websocket).info( + f"๐ŸŽฏ Received FRBCFillLevelTargetProfile for {resource_id}" + ) + self._logger(websocket).debug(message.to_json()) self.ensure_resource_is_registered(resource_id=resource_id) @@ -599,16 +710,20 @@ def handle_frbc_fill_level_target_profile( # Duration objects have a value in milliseconds total_duration_ms = sum(int(elem.duration) for elem in message.elements) total_duration_min = total_duration_ms / 60000 - self.app.logger.debug( + self._logger(websocket).debug( f" ๐ŸŽฏ Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" ) except Exception as e: - self.app.logger.debug(f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}") + self._logger(websocket).debug( + f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}" + ) - self.app.logger.info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") + self._logger(websocket).info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) - def handle_power_measurement(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_power_measurement( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, PowerMeasurement): return @@ -626,14 +741,18 @@ def handle_power_measurement(self, _: "S2FlaskWSServerSync", message: S2Message, resource_or_actuator_id=resource_id, ) except Exception as exc: - self.app.logger.warning(f"PowerMeasurement could not be saved: {str(exc)}") + self._logger(websocket).warning( + f"PowerMeasurement could not be saved: {str(exc)}" + ) - def handle_frbc_storage_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_frbc_storage_status( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, FRBCStorageStatus): return resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self.app.logger.info(f"๐Ÿ”‹ Received FRBCStorageStatus for {resource_id}") - self.app.logger.debug(message.to_json()) + self._logger(websocket).info(f"๐Ÿ”‹ Received FRBCStorageStatus for {resource_id}") + self._logger(websocket).debug(message.to_json()) self.ensure_resource_is_registered(resource_id=resource_id) @@ -644,24 +763,36 @@ def handle_frbc_storage_status(self, _: "S2FlaskWSServerSync", message: S2Messag data_source_id=self.data_source_id, resource_or_actuator_id=resource_id, ) - self.app.logger.info(f"๐Ÿ”‹ StorageStatus: {message.present_fill_level:.1f}%") + self._logger(websocket).info( + f"๐Ÿ”‹ StorageStatus: {message.present_fill_level:.1f}%" + ) self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_actuator_status(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_frbc_actuator_status( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, FRBCActuatorStatus): return resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self.app.logger.info(f"โš™๏ธ Received FRBCActuatorStatus for {resource_id} (actuator: {message.actuator_id})") - self.app.logger.debug(message.to_json()) + self._logger(websocket).info( + f"โš™๏ธ Received FRBCActuatorStatus for {resource_id} (actuator: {message.actuator_id})" + ) + self._logger(websocket).debug(message.to_json()) self.ensure_resource_is_registered(resource_id=resource_id) # Store actuator status by actuator_id to support multiple actuators - self._device_data[resource_id].actuator_statuses[str(message.actuator_id)] = message - self.app.logger.debug(f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}") + self._device_data[resource_id].actuator_statuses[ + str(message.actuator_id) + ] = message + self._logger(websocket).debug( + f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}" + ) self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_usage_forecast(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_frbc_usage_forecast( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, FRBCUsageForecast): return @@ -677,16 +808,20 @@ def handle_frbc_usage_forecast(self, _: "S2FlaskWSServerSync", message: S2Messag # Duration objects have a value in milliseconds total_duration_ms = sum(int(elem.duration) for elem in message.elements) total_duration_min = total_duration_ms / 60000 - self.app.logger.debug( + self._logger(websocket).debug( f" ๐Ÿ’ง Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" ) except Exception as e: - self.app.logger.debug(f" ๐Ÿ’ง Start: {message.start_time.strftime('%H:%M:%S')}") + self._logger(websocket).debug( + f" ๐Ÿ’ง Start: {message.start_time.strftime('%H:%M:%S')}" + ) - self.app.logger.info(f"๐Ÿ’ง UsageForecast: {n_elements} element(s)") + self._logger(websocket).info(f"๐Ÿ’ง UsageForecast: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) - def handle_frbc_leakage_behaviour(self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock) -> None: + def handle_frbc_leakage_behaviour( + self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock + ) -> None: if not isinstance(message, FRBCLeakageBehaviour): return @@ -703,13 +838,13 @@ def handle_frbc_leakage_behaviour(self, _: "S2FlaskWSServerSync", message: S2Mes first_elem = message.elements[0] leakage_rate = first_elem.leakage_rate fill_range = first_elem.fill_level_range - self.app.logger.debug( + self._logger(websocket).debug( f" ๐Ÿ”„ Leakage rate: {leakage_rate}, Fill range: {fill_range.start_of_range}-{fill_range.end_of_range}" ) except Exception: pass - self.app.logger.info(f"๐Ÿ”„ LeakageBehaviour: {n_elements} element(s)") + self._logger(websocket).info(f"๐Ÿ”„ LeakageBehaviour: {n_elements} element(s)") self._check_and_generate_instructions(resource_id, websocket) def ensure_resource_is_registered(self, resource_id: str): @@ -722,7 +857,9 @@ def ensure_resource_is_registered(self, resource_id: str): generic_asset_type=asset_type, ) except Exception as exc: - self.app.logger.warning(f"Resource could not be saved as an asset: {str(exc)}") + self.app.logger.warning( + f"Resource could not be saved as an asset: {str(exc)}" + ) if resource_id not in self._device_data: self._device_data[resource_id] = FRBCDeviceData() @@ -737,7 +874,9 @@ def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): parent_asset=self._assets[resource_id], ) except Exception as exc: - self.app.logger.warning(f"Actuator could not be saved as an asset: {str(exc)}") + self.app.logger.warning( + f"Actuator could not be saved as an asset: {str(exc)}" + ) def save_attribute(self, resource_id: str, **kwargs): asset = self._assets[resource_id] @@ -745,7 +884,9 @@ def save_attribute(self, resource_id: str, **kwargs): try: asset.attributes[k] = v except Exception as exc: - self.app.logger.warning(f"Failed to save {k}: {v} as an asset attribute of {asset}: {str(exc)}") + self.app.logger.warning( + f"Failed to save {k}: {v} as an asset attribute of {asset}: {str(exc)}" + ) @only_if_timer_due("sensor_name", "resource_or_actuator_id", "data_source_id") def save_event( @@ -772,7 +913,9 @@ def save_event( generic_asset=asset, ) except Exception as exc: - self.app.logger.warning(f"{capitalize(sensor_name)} sensor could not be saved: {str(exc)}") + self.app.logger.warning( + f"{capitalize(sensor_name)} sensor could not be saved: {str(exc)}" + ) return try: event_value = convert_units( @@ -791,7 +934,8 @@ def save_event( belief = TimedBelief( sensor=sensor, source=data_source, - event_start=event_start or floored_server_now(self._minimum_measurement_period), + event_start=event_start + or floored_server_now(self._minimum_measurement_period), event_value=event_value, belief_time=server_now(), cumulative_probability=0.5, @@ -809,12 +953,27 @@ def save_event( logger.error(f"Cannot save event values of type {type(event_value)}.") return save_to_db(bdf) - self.app.logger.debug(f"โœ… {capitalize(sensor_name)} saved successfully: {bdf}") + self.app.logger.debug( + f"โœ… {capitalize(sensor_name)} saved successfully: {bdf}" + ) except Exception as exc: - self.app.logger.warning(f"{capitalize(sensor_name)} could not be saved as sensor data: {str(exc)}") + self.app.logger.warning( + f"{capitalize(sensor_name)} could not be saved as sensor data: {str(exc)}" + ) - def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> None: # noqa: C901 + def _logger(self, websocket: Sock): + resource_id = self._websocket_to_resource[websocket] + dd = self._device_data[resource_id] + if hasattr(dd, "logger"): + logger = dd.logger + else: + logger = self.app.logger + return logger + + def _check_and_generate_instructions( + self, resource_id: str, websocket: Sock + ) -> None: # noqa: C901 """Check if we have all required data and generate instructions if so.""" device_data = self._device_data.get(resource_id) if device_data: @@ -847,8 +1006,13 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> missing_items.append("โœ… StorageStatus") # Check actuator statuses in detail - if device_data.system_description and device_data.system_description.actuators: - required_actuators = {str(a.id) for a in device_data.system_description.actuators} + if ( + device_data.system_description + and device_data.system_description.actuators + ): + required_actuators = { + str(a.id) for a in device_data.system_description.actuators + } received_actuators = set(device_data.actuator_statuses.keys()) missing_actuators = required_actuators - received_actuators @@ -857,45 +1021,63 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> f"โŒ ActuatorStatus ({len(received_actuators)}/{len(required_actuators)} received)" ) for missing_id in missing_actuators: - self.app.logger.debug(f" โณ Missing actuator status for: {missing_id}") + self._logger(websocket).debug( + f" โณ Missing actuator status for: {missing_id}" + ) else: - missing_items.append(f"โœ… ActuatorStatus (all {len(required_actuators)} received)") + missing_items.append( + f"โœ… ActuatorStatus (all {len(required_actuators)} received)" + ) else: missing_items.append("โŒ ActuatorStatus (no actuators defined)") # Log the status status_summary = " | ".join(missing_items) - self.app.logger.debug(f"๐Ÿ“Š Device readiness: {status_summary}") + self._logger(websocket).debug(f"๐Ÿ“Š Device readiness: {status_summary}") if device_data is None or not device_data.is_complete(): # Log what's still missing if device_data is None: - self.app.logger.info(f"โณ No device data yet for {resource_id[:8]}...") + self._logger(websocket).info( + f"โณ No device data yet for {resource_id[:8]}..." + ) else: missing = [] if not device_data.system_description: missing.append("SystemDescription") - if not device_data.fill_level_target_profile and not device_data.usage_forecast: + if ( + not device_data.fill_level_target_profile + and not device_data.usage_forecast + ): missing.append("FillLevelTargetProfile or UsageForecast") if not device_data.storage_status: missing.append("StorageStatus") - if device_data.system_description and device_data.system_description.actuators: - required = {str(a.id) for a in device_data.system_description.actuators} + if ( + device_data.system_description + and device_data.system_description.actuators + ): + required = { + str(a.id) for a in device_data.system_description.actuators + } received = set(device_data.actuator_statuses.keys()) if required - received: missing.append("ActuatorStatus") - self.app.logger.info(f"โณ Waiting for: {', '.join(missing)}") + self._logger(websocket).info(f"โณ Waiting for: {', '.join(missing)}") return # Check rate limiting based on FLEXMEASURES_S2_REPLANNING_FREQUENCY connection_state = self._connection_states.get(websocket) if connection_state is None: - self.app.logger.warning(f"โš ๏ธ No connection state for {resource_id[:8]}...") + self._logger(websocket).warning( + f"โš ๏ธ No connection state for {resource_id[:8]}..." + ) return # Parse replanning frequency from config - replanning_freq_str = self.app.config.get("FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M") + replanning_freq_str = self.app.config.get( + "FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M" + ) try: replanning_frequency = parse_duration(replanning_freq_str) if replanning_frequency is None: @@ -903,21 +1085,29 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> if not isinstance(replanning_frequency, timedelta): # Handle isodate.Duration objects by converting to timedelta # For simplicity, assume it's a basic duration that can be converted - replanning_frequency = timedelta(seconds=replanning_frequency.total_seconds()) + replanning_frequency = timedelta( + seconds=replanning_frequency.total_seconds() + ) except Exception as e: - self.app.logger.error(f"โŒ Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}") + self.app.logger.error( + f"โŒ Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}" + ) replanning_frequency = timedelta(minutes=5) # Default to 5 minutes # Check if we can compute based on rate limiting if not connection_state.can_compute(replanning_frequency): - time_since_last = datetime.now(timezone.utc) - connection_state.last_compute_time + time_since_last = ( + datetime.now(timezone.utc) - connection_state.last_compute_time + ) remaining_time = replanning_frequency - time_since_last - self.app.logger.debug( + self._logger(websocket).debug( f"โฑ๏ธ Rate limit: wait {remaining_time.total_seconds():.0f}s (last: {time_since_last.total_seconds():.0f}s ago)" ) return - self.app.logger.info(f"๐ŸŽฏ Generating instructions for {resource_id[:8]}...") + self._logger(websocket).info( + f"๐ŸŽฏ Generating instructions for {resource_id[:8]}..." + ) try: # Use the S2FlaskScheduler to create and store device state @@ -939,14 +1129,18 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> now = datetime.now(timezone.utc) future_time = now + timedelta(minutes=15) minutes_offset = future_time.minute % 5 - start_aligned = future_time.replace(minute=future_time.minute - minutes_offset, second=0, microsecond=0) + start_aligned = future_time.replace( + minute=future_time.minute - minutes_offset, second=0, microsecond=0 + ) # Update scheduler time window self.s2_scheduler.start = start_aligned - self.s2_scheduler.end = start_aligned + timedelta(hours=24) # 24-hour planning window + self.s2_scheduler.end = start_aligned + timedelta( + hours=24 + ) # 24-hour planning window self.s2_scheduler.belief_time = start_aligned - self.app.logger.debug( + self._logger(websocket).debug( f"๐Ÿ• Scheduler window: {self.s2_scheduler.start.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}" ) @@ -954,15 +1148,23 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> schedule_results = self.s2_scheduler.compute() # Filter and send generated instructions - frbc_instructions = [result for result in schedule_results if isinstance(result, FRBCInstruction)] - filtered_instructions = self._filter_instructions_by_operation_mode(frbc_instructions, connection_state) + frbc_instructions = [ + result + for result in schedule_results + if isinstance(result, FRBCInstruction) + ] + filtered_instructions = self._filter_instructions_by_operation_mode( + frbc_instructions, connection_state, websocket + ) # Revoke previous instructions before sending new ones self._revoke_previous_instructions(connection_state, websocket) # Log instruction summary before sending if filtered_instructions: - self.app.logger.info(f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):") + self._logger(websocket).info( + f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):" + ) # Send new instructions and store them current_time = datetime.now(timezone.utc) @@ -991,23 +1193,29 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> exec_datetime = instruction.execution_time time_until_exec = (exec_datetime - current_time).total_seconds() if time_until_exec < 0: - self.app.logger.warning( + self._logger(websocket).warning( f" โš ๏ธ Instruction {instr_id_short}... has execution time {time_until_exec:.0f}s in the PAST!" ) elif time_until_exec < 60: - self.app.logger.warning( + self._logger(websocket).warning( f" โš ๏ธ Instruction {instr_id_short}... executes in only {time_until_exec:.0f}s (might be too soon)" ) # Log with short IDs for readability - self.app.logger.info( + self._logger(websocket).info( f" {idx}. {instr_id_short}... | mode: {mode_id_short}... | factor: {factor:.2f} | actuator: {actuator_short}... | exec: {exec_time}" ) # Log full IDs at debug level - self.app.logger.debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") - self.app.logger.debug(f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}") - self.app.logger.debug(f" โš™๏ธ Full actuator ID: {actuator_id_full}") + self._logger(websocket).debug( + f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}" + ) + self._logger(websocket).debug( + f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}" + ) + self._logger(websocket).debug( + f" โš™๏ธ Full actuator ID: {actuator_id_full}" + ) # Update the last operation mode for this connection connection_state.last_operation_mode = instruction.operation_mode @@ -1024,7 +1232,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> device_short = str(result["device"])[:8] if isinstance(result.get("data"), pd.Series): n_values = len(result["data"]) - self.app.logger.debug( + self._logger(websocket).debug( f" ๐Ÿ’พ Saving {n_values} energy values for device {device_short}... ({result.get('unit', '?')})" ) self.save_event( @@ -1037,7 +1245,7 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> sensor_unit="W", ) if isinstance(result, dict) and "fill level" in result: - self.app.logger.debug(f"Saving result: {result}") + self._logger(websocket).debug(f"Saving result: {result}") self.save_event( sensor_name="fill level", resource_or_actuator_id=str(result["fill level"]), @@ -1045,15 +1253,23 @@ def _check_and_generate_instructions(self, resource_id: str, websocket: Sock) -> data_source_id=self.s2_scheduler.data_source.id, ) if energy_data_count > 0: - self.app.logger.info(f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)") + self._logger(websocket).info( + f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)" + ) except Exception as exc: - self.app.logger.warning(f"โš ๏ธ Energy data save failed: {str(exc)}") + self._logger(websocket).warning( + f"โš ๏ธ Energy data save failed: {str(exc)}" + ) else: # Scheduler not available - log warning and skip instruction generation - self.app.logger.warning(f"โš ๏ธ S2FlaskScheduler not available for {resource_id}") + self.app.logger.warning( + f"โš ๏ธ S2FlaskScheduler not available for {resource_id}" + ) except Exception as e: - self.app.logger.error(f"๐Ÿ’ฅ Error generating instructions for {resource_id}: {e}") + self.app.logger.error( + f"๐Ÿ’ฅ Error generating instructions for {resource_id}: {e}" + ) import traceback self.app.logger.debug(f"Traceback: {traceback.format_exc()}") From b679f5d61cc7be7697b0083d5c31b719aa6d0c53 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 3 Dec 2025 15:05:40 +0100 Subject: [PATCH 154/171] feat: pytest covering WS endpoint Signed-off-by: F.N. Claessen --- flexmeasures/ws/tests/conftest.py | 18 ++++++++++++++++++ flexmeasures/ws/tests/test_s2_client_rm.py | 19 ++++++++++++++++++- 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 flexmeasures/ws/tests/conftest.py diff --git a/flexmeasures/ws/tests/conftest.py b/flexmeasures/ws/tests/conftest.py new file mode 100644 index 0000000000..d2ef74ecb7 --- /dev/null +++ b/flexmeasures/ws/tests/conftest.py @@ -0,0 +1,18 @@ +import threading +import time + +import pytest + + +@pytest.fixture(scope="module") +def server(app): + """Run Flask app with Sock in a thread for testing WebSocket""" + from werkzeug.serving import make_server + + srv = make_server("127.0.0.1", 5005, app) + thread = threading.Thread(target=srv.serve_forever) + thread.start() + time.sleep(0.1) # wait for server to start + yield "ws://127.0.0.1:5005/ping2" + srv.shutdown() + thread.join() diff --git a/flexmeasures/ws/tests/test_s2_client_rm.py b/flexmeasures/ws/tests/test_s2_client_rm.py index e0c66a32f8..93a8a79f53 100644 --- a/flexmeasures/ws/tests/test_s2_client_rm.py +++ b/flexmeasures/ws/tests/test_s2_client_rm.py @@ -1,9 +1,11 @@ -import argparse +import pytest + import logging import threading import datetime import uuid from typing import Callable +import websockets from s2python.authorization.default_client import S2DefaultClient from s2python.generated.gen_s2_pairing import ( @@ -45,6 +47,21 @@ SERVER_URL = "ws://127.0.0.1:5000" +@pytest.mark.asyncio +async def test_ping2_echo(server): + async with websockets.connect(server) as ws: + + # Send a message + await ws.send("hello") + resp = await ws.recv() + assert resp == "hello", "echo should return the same message" + + # Trigger server-side close + await ws.send("close") + with pytest.raises(websockets.exceptions.ConnectionClosedOK): + await ws.recv(), "expected that, after sending 'close', server breaks loop; connection closes" + + class MyFRBCControlType(FRBCControlType): def handle_instruction( self, conn: S2Connection, msg: S2Message, send_okay: Callable[[], None] From a4ea10504bc79c4b26fb7798a31462f27bd7cf0d Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 3 Dec 2025 15:07:36 +0100 Subject: [PATCH 155/171] refactor: move connected WS to fixture Signed-off-by: F.N. Claessen --- flexmeasures/ws/tests/conftest.py | 9 +++++++++ flexmeasures/ws/tests/test_s2_client_rm.py | 23 +++++++++++----------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/flexmeasures/ws/tests/conftest.py b/flexmeasures/ws/tests/conftest.py index d2ef74ecb7..1ecc7e164f 100644 --- a/flexmeasures/ws/tests/conftest.py +++ b/flexmeasures/ws/tests/conftest.py @@ -2,6 +2,8 @@ import time import pytest +import pytest_asyncio +import websockets @pytest.fixture(scope="module") @@ -16,3 +18,10 @@ def server(app): yield "ws://127.0.0.1:5005/ping2" srv.shutdown() thread.join() + + +@pytest_asyncio.fixture +async def ws(server): + """Provide an already connected WebSocket client to tests""" + async with websockets.connect(server) as websocket: + yield websocket diff --git a/flexmeasures/ws/tests/test_s2_client_rm.py b/flexmeasures/ws/tests/test_s2_client_rm.py index 93a8a79f53..2aa51d02a7 100644 --- a/flexmeasures/ws/tests/test_s2_client_rm.py +++ b/flexmeasures/ws/tests/test_s2_client_rm.py @@ -48,18 +48,17 @@ @pytest.mark.asyncio -async def test_ping2_echo(server): - async with websockets.connect(server) as ws: - - # Send a message - await ws.send("hello") - resp = await ws.recv() - assert resp == "hello", "echo should return the same message" - - # Trigger server-side close - await ws.send("close") - with pytest.raises(websockets.exceptions.ConnectionClosedOK): - await ws.recv(), "expected that, after sending 'close', server breaks loop; connection closes" +async def test_ping2_echo(ws): + + # Send a message + await ws.send("hello") + resp = await ws.recv() + assert resp == "hello", "echo should return the same message" + + # Trigger server-side close + await ws.send("close") + with pytest.raises(websockets.exceptions.ConnectionClosedOK): + await ws.recv(), "expected that, after sending 'close', server breaks loop; connection closes" class MyFRBCControlType(FRBCControlType): From 4fdfde9379ccde0feb2d64a1572ce114dd028f66 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Wed, 3 Dec 2025 15:17:00 +0100 Subject: [PATCH 156/171] refactor: make fixture reusable for connecting to other WS endpoints Signed-off-by: F.N. Claessen --- flexmeasures/ws/tests/conftest.py | 18 +++++++++++------- flexmeasures/ws/tests/test_s2_client_rm.py | 5 ++++- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/flexmeasures/ws/tests/conftest.py b/flexmeasures/ws/tests/conftest.py index 1ecc7e164f..077e447f68 100644 --- a/flexmeasures/ws/tests/conftest.py +++ b/flexmeasures/ws/tests/conftest.py @@ -4,24 +4,28 @@ import pytest import pytest_asyncio import websockets +from werkzeug.serving import make_server @pytest.fixture(scope="module") def server(app): """Run Flask app with Sock in a thread for testing WebSocket""" - from werkzeug.serving import make_server - srv = make_server("127.0.0.1", 5005, app) thread = threading.Thread(target=srv.serve_forever) thread.start() time.sleep(0.1) # wait for server to start - yield "ws://127.0.0.1:5005/ping2" + yield "ws://127.0.0.1:5005" srv.shutdown() thread.join() @pytest_asyncio.fixture -async def ws(server): - """Provide an already connected WebSocket client to tests""" - async with websockets.connect(server) as websocket: - yield websocket +async def connect_to_ws(server): + """Yield a callable to connect to a given WS endpoint by name.""" + + async def connect(endpoint_name): + url = f"{server}/{endpoint_name}" + conn = await websockets.connect(url) + return conn + + yield connect diff --git a/flexmeasures/ws/tests/test_s2_client_rm.py b/flexmeasures/ws/tests/test_s2_client_rm.py index 2aa51d02a7..a3c41a1ff9 100644 --- a/flexmeasures/ws/tests/test_s2_client_rm.py +++ b/flexmeasures/ws/tests/test_s2_client_rm.py @@ -48,7 +48,10 @@ @pytest.mark.asyncio -async def test_ping2_echo(ws): +async def test_ping2_echo(connect_to_ws): + + # Connect to WS endpoint + ws = await connect_to_ws("ping2") # Send a message await ws.send("hello") From 3be63f42868c7f4a83fb93a42fcc6f28105b84d4 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 10:41:14 +0100 Subject: [PATCH 157/171] feat: do not disable existing loggers Signed-off-by: F.N. Claessen --- flexmeasures/utils/config_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flexmeasures/utils/config_utils.py b/flexmeasures/utils/config_utils.py index 77f7548adc..35ac878fdd 100644 --- a/flexmeasures/utils/config_utils.py +++ b/flexmeasures/utils/config_utils.py @@ -24,6 +24,7 @@ flexmeasures_logging_config = { "version": 1, + "disable_existing_loggers": False, "formatters": { "default": {"format": "[FLEXMEASURES][%(asctime)s] %(levelname)s: %(message)s"}, "detail": { From f23fd400abaa9aa2b5255545c915869cba6c6994 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 10:49:45 +0100 Subject: [PATCH 158/171] feat: only re-enable s2python logger instead of re-enabling all loggers Signed-off-by: F.N. Claessen --- flexmeasures/utils/config_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/flexmeasures/utils/config_utils.py b/flexmeasures/utils/config_utils.py index 35ac878fdd..3bbebd7e1d 100644 --- a/flexmeasures/utils/config_utils.py +++ b/flexmeasures/utils/config_utils.py @@ -24,7 +24,6 @@ flexmeasures_logging_config = { "version": 1, - "disable_existing_loggers": False, "formatters": { "default": {"format": "[FLEXMEASURES][%(asctime)s] %(levelname)s: %(message)s"}, "detail": { @@ -46,7 +45,10 @@ "backupCount": 6, }, }, - "root": {"level": "INFO", "handlers": ["console", "file"], "propagate": True}, + "loggers": { + "root": {"level": "INFO", "handlers": ["console", "file"], "propagate": True}, + "s2python": {"propagate": True}, + }, } From b1dcda8fda301d665348a42324492202fc11ffbf Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 10:57:17 +0100 Subject: [PATCH 159/171] dev: set s2python logger level to DEBUG Signed-off-by: F.N. Claessen --- flexmeasures/utils/config_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/utils/config_utils.py b/flexmeasures/utils/config_utils.py index 3bbebd7e1d..28302b938a 100644 --- a/flexmeasures/utils/config_utils.py +++ b/flexmeasures/utils/config_utils.py @@ -47,7 +47,7 @@ }, "loggers": { "root": {"level": "INFO", "handlers": ["console", "file"], "propagate": True}, - "s2python": {"propagate": True}, + "s2python": {"level": "DEBUG", "propagate": True}, }, } From ba08bcf2123cbea6363eaac0852ceecec07b4472 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 11:31:15 +0100 Subject: [PATCH 160/171] feat: make s2-python log statements visually distinct Signed-off-by: F.N. Claessen --- flexmeasures/utils/config_utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/flexmeasures/utils/config_utils.py b/flexmeasures/utils/config_utils.py index 28302b938a..e577ca9552 100644 --- a/flexmeasures/utils/config_utils.py +++ b/flexmeasures/utils/config_utils.py @@ -26,6 +26,7 @@ "version": 1, "formatters": { "default": {"format": "[FLEXMEASURES][%(asctime)s] %(levelname)s: %(message)s"}, + "s2python": {"format": "\033[94m[FLEXMEASURES][%(asctime)s][s2-python] %(levelname)s: %(message)s\033[0m"}, # blue "detail": { "format": "[FLEXMEASURES][%(asctime)s] %(levelname)s: %(message)s [logged in %(pathname)s:%(lineno)d]" }, @@ -36,6 +37,11 @@ "stream": sys.stdout, "formatter": "default", }, + "s2console": { # handler specific to s2python + "class": "logging.StreamHandler", + "stream": sys.stdout, + "formatter": "s2python", + }, "file": { "class": "logging.handlers.RotatingFileHandler", "level": "INFO", @@ -47,7 +53,7 @@ }, "loggers": { "root": {"level": "INFO", "handlers": ["console", "file"], "propagate": True}, - "s2python": {"level": "DEBUG", "propagate": True}, + "s2python": {"level": "DEBUG", "handlers": ["s2console"], "propagate": False}, }, } From 403936d0d229cf44f197dc205725169fb739120a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 13:00:25 +0100 Subject: [PATCH 161/171] fix: failsafe against missing resource logger Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index e27dc5e5ef..b384e777aa 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -963,8 +963,9 @@ def save_event( ) def _logger(self, websocket: Sock): - resource_id = self._websocket_to_resource[websocket] - dd = self._device_data[resource_id] + """Get the logger associated with the resource ID, or the app logger otherwise.""" + resource_id = self._websocket_to_resource.get(websocket, "default_resource") + dd = self._device_data.get(resource_id) if hasattr(dd, "logger"): logger = dd.logger else: From 6593811cc4784717b2d5a9877fa527b51ba74a30 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 13:02:05 +0100 Subject: [PATCH 162/171] feat: add type annotations to _logger Signed-off-by: F.N. Claessen --- flexmeasures/ws/s2_ws_sync.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py index b384e777aa..7e71a7b184 100644 --- a/flexmeasures/ws/s2_ws_sync.py +++ b/flexmeasures/ws/s2_ws_sync.py @@ -962,15 +962,15 @@ def save_event( f"{capitalize(sensor_name)} could not be saved as sensor data: {str(exc)}" ) - def _logger(self, websocket: Sock): + def _logger(self, websocket: Sock) -> logging.Logger: """Get the logger associated with the resource ID, or the app logger otherwise.""" resource_id = self._websocket_to_resource.get(websocket, "default_resource") - dd = self._device_data.get(resource_id) + dd: str | None = self._device_data.get(resource_id) if hasattr(dd, "logger"): - logger = dd.logger + lgr = dd.logger else: - logger = self.app.logger - return logger + lgr = self.app.logger + return lgr def _check_and_generate_instructions( self, resource_id: str, websocket: Sock From 72d5e9ddff141e4d1bfd84b1a77bba8c3ac720de Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 13:36:40 +0100 Subject: [PATCH 163/171] fix: commit the users in the setup_roles_users fixture, so that they turn up in the server thread, too Signed-off-by: F.N. Claessen --- flexmeasures/conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 2ffb5e356b..fc3bd21577 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -305,6 +305,7 @@ def create_roles_users(db, test_accounts) -> dict[str, User]: password="testtest", ) ) + db.session.commit() return {user.username: user.id for user in new_users} From 7c4b1bb9336a91d0344ec200699a4f100cc91ee6 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 5 Dec 2025 16:48:11 +0100 Subject: [PATCH 164/171] dev: also log flexmeasures-s2 with a color Signed-off-by: F.N. Claessen --- flexmeasures/utils/config_utils.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/flexmeasures/utils/config_utils.py b/flexmeasures/utils/config_utils.py index e577ca9552..076a0aa309 100644 --- a/flexmeasures/utils/config_utils.py +++ b/flexmeasures/utils/config_utils.py @@ -26,7 +26,10 @@ "version": 1, "formatters": { "default": {"format": "[FLEXMEASURES][%(asctime)s] %(levelname)s: %(message)s"}, - "s2python": {"format": "\033[94m[FLEXMEASURES][%(asctime)s][s2-python] %(levelname)s: %(message)s\033[0m"}, # blue + "s2python": {"format": "\033[94m[FLEXMEASURES][%(asctime)s][%(name)s] %(levelname)s: %(message)s\033[0m"}, # blue + "s2rm": { + "format": "\033[91m[FLEXMEASURES][%(asctime)s][%(name)s] %(levelname)s: %(message)s\033[0m" + }, "detail": { "format": "[FLEXMEASURES][%(asctime)s] %(levelname)s: %(message)s [logged in %(pathname)s:%(lineno)d]" }, @@ -42,6 +45,11 @@ "stream": sys.stdout, "formatter": "s2python", }, + "s2rmconsole": { # handler specific to s2python-rm + "class": "logging.StreamHandler", + "stream": sys.stdout, + "formatter": "s2rm", + }, "file": { "class": "logging.handlers.RotatingFileHandler", "level": "INFO", @@ -54,6 +62,11 @@ "loggers": { "root": {"level": "INFO", "handlers": ["console", "file"], "propagate": True}, "s2python": {"level": "DEBUG", "handlers": ["s2console"], "propagate": False}, + "flexmeasures_s2.rm": { + "level": "DEBUG", + "handlers": ["s2rmconsole"], + "propagate": False, + } }, } From 91cd71ffe585c05e619054ecca345171a4988649 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 7 Dec 2025 14:45:25 +0100 Subject: [PATCH 165/171] dev: use pairing token as bearer token Signed-off-by: F.N. Claessen --- flexmeasures/ws/tests/test_s2_client_rm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flexmeasures/ws/tests/test_s2_client_rm.py b/flexmeasures/ws/tests/test_s2_client_rm.py index a3c41a1ff9..bf0922e9c1 100644 --- a/flexmeasures/ws/tests/test_s2_client_rm.py +++ b/flexmeasures/ws/tests/test_s2_client_rm.py @@ -243,6 +243,7 @@ def deactivate(self, conn: S2Connection) -> None: ), reconnect=True, verify_certificate=False, + bearer_token=pairing_token, ) # Start S2 session with the connection details From 81b7b3a4a7fef39992a00f703b8893204b7a4187 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 7 Dec 2025 22:07:48 +0100 Subject: [PATCH 166/171] refactor: move S2FlaskWSServerSync from flexmeasures to flexmeasures-s2 plugin Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 6 +- flexmeasures/ws/s2_ws_sync.py | 1277 --------------------------------- 2 files changed, 3 insertions(+), 1280 deletions(-) delete mode 100644 flexmeasures/ws/s2_ws_sync.py diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 8247df0aa8..9ac40d8979 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -47,7 +47,6 @@ def create( # noqa C901 from flexmeasures.utils.config_utils import read_config, configure_logging from flexmeasures.utils.app_utils import set_secret_key, init_sentry from flexmeasures.utils.error_utils import add_basic_error_handlers - from flexmeasures.ws.s2_ws_sync import S2FlaskWSServerSync # Create app @@ -60,7 +59,6 @@ def create( # noqa C901 from flexmeasures.ws import sock sock.init_app(app) - s2_ws = S2FlaskWSServerSync(app=app, sock=sock) # noqa: F841 if env is not None: # overwrite app.config["FLEXMEASURES_ENV"] = env @@ -246,9 +244,11 @@ def teardown_request(exception=None): @app.before_request def ws_connection_auth(): + s2_ws = app.extensions["s2_ws_server"] + s2_ws.app = app # Check if this is the S2 WS connection route is_ws_connection = ( - request.path == s2_ws.ws_path + request.path == s2_ws.blueprint.url_prefix and request.headers.get("Upgrade", "").lower() == "websocket" ) diff --git a/flexmeasures/ws/s2_ws_sync.py b/flexmeasures/ws/s2_ws_sync.py deleted file mode 100644 index 7e71a7b184..0000000000 --- a/flexmeasures/ws/s2_ws_sync.py +++ /dev/null @@ -1,1277 +0,0 @@ -""" -Flask implementation of the S2 protocol WebSocket server (sync mode only). -""" - -import json -import logging -import math -import traceback -import uuid -from datetime import datetime, timedelta, timezone -from typing import Any, Callable, Dict, Optional, Type, List - -import pandas as pd -from flask import Flask -from flask_sock import ConnectionClosed, Sock - -from flexmeasures import Account, Asset, AssetType, Sensor, Source, User -from flexmeasures.data import db -from flexmeasures.data.models.time_series import TimedBelief -from flexmeasures.data.utils import save_to_db -from flexmeasures.api.common.utils.validators import parse_duration -from flexmeasures.data.services.utils import get_or_create_model -from flexmeasures.utils.coding_utils import only_if_timer_due -from flexmeasures.utils.flexmeasures_inflection import capitalize -from flexmeasures.utils.time_utils import floored_server_now, server_now -from flexmeasures.utils.unit_utils import convert_units -from s2python.common import ( - ControlType, - EnergyManagementRole, - Handshake, - HandshakeResponse, - InstructionStatus, - InstructionStatusUpdate, - PowerMeasurement, - ReceptionStatus, - ReceptionStatusValues, - RevokableObjects, - RevokeObject, - SelectControlType, - ResourceManagerDetails, -) -from s2python.frbc import ( - FRBCSystemDescription, - FRBCFillLevelTargetProfile, - FRBCStorageStatus, - FRBCActuatorStatus, - FRBCInstruction, - FRBCUsageForecast, - FRBCLeakageBehaviour, -) -from s2python.message import S2Message -from s2python.s2_parser import S2Parser -from s2python.s2_validation_error import S2ValidationError -from s2python.version import S2_VERSION -from timely_beliefs import BeliefsDataFrame - -# Set up logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("S2FlaskWSServerSync") - - -class FRBCDeviceData: - """Class to store FRBC device data received from Resource Manager.""" - - def __init__(self): - self.system_description: Optional[FRBCSystemDescription] = None - self.fill_level_target_profile: Optional[FRBCFillLevelTargetProfile] = None - self.storage_status: Optional[FRBCStorageStatus] = None - self.actuator_statuses: Dict[str, FRBCActuatorStatus] = ( - {} - ) # Changed to dict by actuator_id - self.usage_forecast: Optional[FRBCUsageForecast] = None - self.leakage_behaviour: Optional[FRBCLeakageBehaviour] = None - self.resource_id: Optional[str] = None - self.instructions: Optional[List[FRBCInstruction]] = [] - self.logger = None - - def is_complete(self) -> bool: - """Check if we have received all necessary data to generate instructions.""" - # System description and storage status are always required - if self.system_description is None or self.storage_status is None: - return False - - # Fill level target profile OR usage forecast should be provided (at least one) - # Both are optional individually, but at least one should exist for meaningful planning - has_fill_level_target = self.fill_level_target_profile is not None - has_usage_forecast = self.usage_forecast is not None - - if not (has_fill_level_target or has_usage_forecast): - return False - - # Check that we have actuator status for ALL actuators in system description - if self.system_description.actuators: - required_actuator_ids = { - str(actuator.id) for actuator in self.system_description.actuators - } - received_actuator_ids = set(self.actuator_statuses.keys()) - return required_actuator_ids.issubset(received_actuator_ids) - - return True - - -class ConnectionState: - """Class to track the state of each WebSocket connection for rate limiting.""" - - def __init__(self): - self.last_compute_time: Optional[datetime] = None - self.resource_id: Optional[str] = None - self.last_operation_mode: Optional[uuid.UUID] = None - self.sent_instructions: List[FRBCInstruction] = ( - [] - ) # Store sent instructions for revocation - self.instruction_statuses: Dict[uuid.UUID, InstructionStatus] = ( - {} - ) # Track status of each instruction by instruction_id - - def can_compute(self, replanning_frequency: timedelta) -> bool: - """Check if enough time has passed since the last compute call.""" - if self.last_compute_time is None: - return True - return ( - datetime.now(timezone.utc) - self.last_compute_time >= replanning_frequency - ) - - def update_compute_time(self) -> None: - """Update the last compute time to now.""" - self.last_compute_time = datetime.now(timezone.utc) - - def update_instruction_status( - self, instruction_id: uuid.UUID, status: InstructionStatus - ) -> None: - """Update the status of an instruction.""" - self.instruction_statuses[instruction_id] = status - - -class MessageHandlersSync: - """Class to manage sync message handlers for different message types.""" - - handlers: Dict[Type[S2Message], Callable] - - def __init__(self) -> None: - self.handlers = {} - - def handle_message( - self, - server: "S2FlaskWSServerSync", - msg: S2Message, - websocket: Sock, - ) -> None: - """Handle the S2 message using the registered handler.""" - handler = self.handlers.get(type(msg)) - if handler is not None: - try: - handler(server, msg, websocket) - except Exception: - message_id = getattr(msg, "message_id", "N/A") - logger.error( - "While processing message %s an unrecoverable error occurred.", - message_id, - ) - logger.debug("Error: %s", traceback.format_exc()) - server.respond_with_reception_status( - subject_message_id=getattr( - msg, - "message_id", - uuid.UUID("00000000-0000-0000-0000-000000000000"), - ), - status=ReceptionStatusValues.TEMPORARY_ERROR, - diagnostic_label=f"While processing message {message_id} an unrecoverable error occurred.", - websocket=websocket, - ) - raise - else: - logger.warning( - f"Ignoring message of type {type(msg)}; no handler is registered", - ) - - def register_handler( - self, msg_type: Type[S2Message], handler: Callable[..., Any] - ) -> None: - self.handlers[msg_type] = handler - - -class S2FlaskWSServerSync: - """Flask-based WebSocket server implementation for S2 protocol (sync mode only).""" - - def __init__( - self, - role: EnergyManagementRole = EnergyManagementRole.CEM, - ws_path: str = "/s2", - app: Optional[Flask] = None, - sock: Optional[Sock] = None, - ) -> None: - self.role = role - self.ws_path = ws_path - self.app = app if app else Flask(__name__) - self.sock = sock if sock else Sock(self.app) - self._handlers = MessageHandlersSync() - self.s2_parser = S2Parser() - self._connections: Dict[str, Sock] = {} - self._device_data: Dict[str, FRBCDeviceData] = ( - {} - ) # Store device data by resource_id - self._websocket_to_resource: Dict[Sock, str] = ( - {} - ) # Map websocket to resource_id - self._connection_states: Dict[Sock, ConnectionState] = ( - {} - ) # Track connection state for rate limiting - self._register_default_handlers() - self.sock.route(self.ws_path)(self._ws_handler) - self.s2_scheduler = None - self.account: Account | None = None - self.user: User | None = None - self.data_source_id: int | None = None - self._assets: Dict[str, Asset] = {} - - self._minimum_measurement_period: timedelta = timedelta(minutes=5) - self._timers: dict[str, datetime] = dict() - - def _is_timer_due(self, name: str) -> bool: - now = datetime.now() - due_time = self._timers.get(name, now - self._minimum_measurement_period) - if due_time <= now: - # Get total seconds of the period - period_seconds = self._minimum_measurement_period.total_seconds() - - # Seconds since start of the hour - seconds_since_hour = now.minute * 60 + now.second + now.microsecond / 1e6 - - # Ceil to next multiple of period_seconds - next_tick_seconds = ( - math.ceil(seconds_since_hour / period_seconds) * period_seconds - ) - - # Compute next due datetime - next_due = now.replace(minute=0, second=0, microsecond=0) + timedelta( - seconds=next_tick_seconds - ) - self._timers[name] = next_due - return True - else: - self.app.logger.debug( - f"Timer for {name} is not due until {self._timers[name]}" - ) - return False - - def _register_default_handlers(self) -> None: - self._handlers.register_handler(Handshake, self.handle_handshake) - self._handlers.register_handler(ReceptionStatus, self.handle_reception_status) - self._handlers.register_handler( - ResourceManagerDetails, self.handle_ResourceManagerDetails - ) - self._handlers.register_handler( - InstructionStatusUpdate, self.handle_instruction_status_update - ) - self._handlers.register_handler(PowerMeasurement, self.handle_power_measurement) - # Register FRBC message handlers - self._handlers.register_handler( - FRBCSystemDescription, self.handle_frbc_system_description - ) - self._handlers.register_handler( - FRBCFillLevelTargetProfile, self.handle_frbc_fill_level_target_profile - ) - self._handlers.register_handler( - FRBCStorageStatus, self.handle_frbc_storage_status - ) - self._handlers.register_handler( - FRBCActuatorStatus, self.handle_frbc_actuator_status - ) - self._handlers.register_handler( - FRBCUsageForecast, self.handle_frbc_usage_forecast - ) - self._handlers.register_handler( - FRBCLeakageBehaviour, self.handle_frbc_leakage_behaviour - ) - - def _ws_handler(self, ws: Sock) -> None: - try: - self.app.logger.info("๐Ÿ”Œ New WebSocket connection received") - self._handle_websocket_connection(ws) - except Exception as e: - self.app.logger.error("โŒ WebSocket handler error: %s", e) - - def _handle_websocket_connection(self, websocket: Sock) -> None: - client_id = str(uuid.uuid4()) - self.app.logger.info(f" Client connected: {client_id[:8]}...") - self._connections[client_id] = websocket - # Initialize connection state for rate limiting - self._connection_states[websocket] = ConnectionState() - try: - while True: - message = websocket.receive() - s2_msg = None - try: - s2_msg = self.s2_parser.parse_as_any_message(message) - - # Log with appropriate emoji based on message type - msg_emoji = { - "Handshake": "๐Ÿค", - "FRBC.SystemDescription": "๐Ÿ“‹", - "FRBC.FillLevelTargetProfile": "๐ŸŽฏ", - "FRBC.StorageStatus": "๐Ÿ”‹", - "FRBC.ActuatorStatus": "โš™๏ธ", - "FRBC.UsageForecast": "๐Ÿ’ง", - "FRBC.LeakageBehaviour": "๐Ÿ”„", - "InstructionStatusUpdate": "๐Ÿ“Š", - "ResourceManagerDetails": "๐Ÿ“", - "PowerMeasurement": "โšก", - }.get(s2_msg.message_type, "๐Ÿ“ฅ") - - self.app.logger.info(f"{msg_emoji} {s2_msg.message_type}") - - # Don't log verbose message content - verbose_message_types = [ - "FRBC.UsageForecast", - "FRBC.ActuatorStatus", - ] - if s2_msg.message_type not in verbose_message_types: - self.app.logger.debug(s2_msg.to_json()) - except json.JSONDecodeError: - self.app.logger.warning(f"โŒ Invalid JSON from client {client_id}") - self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Not valid json.", - websocket=websocket, - ) - continue - except S2ValidationError as e: - self.app.logger.warning( - f"โŒ S2 validation error from client {client_id}: {str(e)}" - ) - try: - json_msg = json.loads(message) - message_id = json_msg.get("message_id") - if message_id: - self.respond_with_reception_status( - subject_message_id=message_id, - status=ReceptionStatusValues.INVALID_MESSAGE, - diagnostic_label=str(e), - websocket=websocket, - ) - else: - self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Message appears valid json but could not find a message_id field.", - websocket=websocket, - ) - except json.JSONDecodeError: - self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Not valid json.", - websocket=websocket, - ) - continue - - # Handle valid message - try: - if not isinstance(s2_msg, ReceptionStatus): - self.respond_with_reception_status( - subject_message_id=s2_msg.message_id, - status=ReceptionStatusValues.OK, - diagnostic_label="Message received.", - websocket=websocket, - ) - self._handlers.handle_message(self, s2_msg, websocket) - - # Finalize transaction - try: - db.session.commit() - except Exception as exc: - self.app.logger.warning(f"โš ๏ธ DB commit failed: {str(exc)}") - except json.JSONDecodeError: - self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Not valid json.", - websocket=websocket, - ) - except S2ValidationError as e: - json_msg = json.loads(message) - message_id = json_msg.get("message_id") - if message_id: - self.respond_with_reception_status( - subject_message_id=message_id, - status=ReceptionStatusValues.INVALID_MESSAGE, - diagnostic_label=str(e), - websocket=websocket, - ) - else: - self.respond_with_reception_status( - subject_message_id=uuid.UUID( - "00000000-0000-0000-0000-000000000000" - ), - status=ReceptionStatusValues.INVALID_DATA, - diagnostic_label="Message appears valid json but could not find a message_id field.", - websocket=websocket, - ) - except Exception as e: - self.app.logger.error( - f"๐Ÿ’ฅ Error processing message from client {client_id}: {str(e)}" - ) - raise - except ConnectionClosed: - self.app.logger.info(f"๐Ÿ”Œ Connection closed: {client_id[:8]}...") - finally: - if client_id in self._connections: - del self._connections[client_id] - # Clean up websocket to resource mapping and device states - if websocket in self._websocket_to_resource: - resource_id = self._websocket_to_resource[websocket] - del self._websocket_to_resource[websocket] - self.app.logger.debug( - f"๐Ÿงน Cleaned up resource mapping for {resource_id}" - ) - - # Clean up device data - if resource_id in self._device_data: - del self._device_data[resource_id] - self.app.logger.debug( - f"๐Ÿงน Cleaned up device data for {resource_id}" - ) - - # Clean up device state from scheduler if available - if ( - hasattr(self, "s2_scheduler") - and self.s2_scheduler is not None - and hasattr(self.s2_scheduler, "remove_device_state") - ): - self.s2_scheduler.remove_device_state(resource_id) - self.app.logger.debug( - f"๐Ÿงน Cleaned up scheduler state for {resource_id}" - ) - - # Clean up connection state - if websocket in self._connection_states: - del self._connection_states[websocket] - - self.app.logger.info(f"๐Ÿšช Client disconnected: {client_id[:8]}...") - - def respond_with_reception_status( - self, - subject_message_id: uuid.UUID, - status: ReceptionStatusValues, - diagnostic_label: str, - websocket: Sock, - ) -> None: - response = ReceptionStatus( - subject_message_id=subject_message_id, - status=status, - diagnostic_label=diagnostic_label, - ) - status_emoji = "โœ…" if status == ReceptionStatusValues.OK else "โŒ" - self._logger(websocket).debug(f"{status_emoji} ReceptionStatus: {status}") - try: - websocket.send(response.to_json()) - except ConnectionClosed: - self._logger(websocket).warning("โš ๏ธ Connection closed during response") - - def _send_and_forget(self, s2_msg: S2Message, websocket: Sock) -> None: - try: - websocket.send(s2_msg.to_json()) - except ConnectionClosed: - self._logger(websocket).warning("โš ๏ธ Connection closed during send") - - def _revoke_previous_instructions( - self, connection_state: ConnectionState, websocket: Sock - ) -> None: - """Revoke all previously sent instructions that are still ACCEPTED or NEW before sending new ones.""" - if not connection_state.sent_instructions: - return - - # Filter instructions to only revoke those with ACCEPTED or NEW status - # Instructions with other statuses have already been removed from memory - instructions_to_revoke = [ - instr - for instr in connection_state.sent_instructions - if connection_state.instruction_statuses.get( - instr.message_id, InstructionStatus.NEW - ) - in (InstructionStatus.NEW, InstructionStatus.ACCEPTED) - ] - - self._logger(websocket).info( - f"๐Ÿ—‘๏ธ Revoking {len(instructions_to_revoke)}/{len(connection_state.sent_instructions)} instructions" - ) - - for instruction in instructions_to_revoke: - revoke_msg = RevokeObject( - message_id=uuid.uuid4(), - object_type=RevokableObjects.FRBC_Instruction, - object_id=instruction.id, - ) - self._send_and_forget(revoke_msg, websocket) - status = connection_state.instruction_statuses.get( - instruction.message_id, InstructionStatus.NEW - ) - self._logger(websocket).debug( - f" ๐Ÿšซ Revoked instruction {str(instruction.id)[:8]}... ({status.value})" - ) - - # Clear the list of sent instructions after revoking - connection_state.sent_instructions.clear() - - def _filter_instructions_by_operation_mode( - self, instructions: list, connection_state: ConnectionState, websocket: Sock - ) -> list: - """Filter instructions to only include those with different operation_mode than the previous instruction.""" - if not instructions: - return instructions - - filtered = [] - last_operation_mode = connection_state.last_operation_mode - skipped = 0 - - for instruction in instructions: - # Always include the first instruction if we haven't sent any before - # or if the operation mode is different from the last sent instruction - if ( - last_operation_mode is None - or instruction.operation_mode != last_operation_mode - ): - filtered.append(instruction) - last_operation_mode = instruction.operation_mode - else: - skipped += 1 - if skipped > 0: - self._logger(websocket).info( - f"๐Ÿ”ฝ Filtered: {len(instructions)} โ†’ {len(filtered)} instructions (skipped {skipped} duplicate modes)" - ) - - return filtered - - def handle_handshake( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, Handshake): - return - self.app.logger.debug(message.to_json()) - - if S2_VERSION not in message.supported_protocol_versions: - raise NotImplementedError( - f"Server protocol {S2_VERSION} not supported by client" - ) - - handshake_response = HandshakeResponse( - message_id=uuid.uuid4(), - selected_protocol_version=S2_VERSION, - ) - self._send_and_forget(handshake_response, websocket) - self.app.logger.info(f"๐Ÿค Handshake complete (protocol {S2_VERSION})") - - # If client is RM, send control type selection - if hasattr(message, "role") and message.role == EnergyManagementRole.RM: - select_control_type = SelectControlType( - message_id=uuid.uuid4(), - control_type=ControlType.FILL_RATE_BASED_CONTROL, - ) - self._send_and_forget(select_control_type, websocket) - self._logger(websocket).info("๐Ÿ“ค SelectControlType: FRBC") - - def handle_reception_status( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, ReceptionStatus): - return - self._logger(websocket).debug(message.to_json()) - - def handle_ResourceManagerDetails( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, ResourceManagerDetails): - return - - # Store the resource_id from ResourceManagerDetails for device identification - resource_id = str(message.resource_id) - self._websocket_to_resource[websocket] = resource_id - - if resource_id not in self._device_data: - self._device_data[resource_id] = FRBCDeviceData() - - dd = self._device_data[resource_id] - dd.resource_id = resource_id - - # Create namespaced logger - # safe name (no spaces/slashes etc) - rm_name = str(message.name).replace(" ", "_") - dd.logger = logging.getLogger(f"flexmeasures_s2.rm.{rm_name}") - - # Inherit app logger handlers/level - dd.logger.setLevel(self.app.logger.level) - - self.app.logger.info(f"๐Ÿ“ RM registered: {resource_id[:8]}... ({message.name})") - dd.logger.info("RM logger initialized") - - def handle_instruction_status_update( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, InstructionStatusUpdate): - return - - # Get the connection state and update instruction status - connection_state = self._connection_states.get(websocket) - if connection_state: - connection_state.update_instruction_status( - message.instruction_id, message.status_type - ) - - # Status emoji mapping - status_emoji = { - InstructionStatus.NEW: "๐Ÿ†•", - InstructionStatus.ACCEPTED: "โœ…", - InstructionStatus.REJECTED: "โŒ", - InstructionStatus.STARTED: "โ–ถ๏ธ", - InstructionStatus.SUCCEEDED: "๐ŸŽ‰", - InstructionStatus.ABORTED: "โ›”", - InstructionStatus.REVOKED: "๐Ÿšซ", - }.get(message.status_type, "๐Ÿ“Š") - - instr_id_full = str(message.instruction_id) - instr_id_short = instr_id_full[:8] - self._logger(websocket).info( - f"{status_emoji} Instruction {instr_id_short}... โ†’ {message.status_type.value}" - ) - self._logger(websocket).debug(f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}") - - # If instruction is rejected, aborted, or revoked, remove it from sent_instructions - if message.status_type not in ( - InstructionStatus.NEW, - InstructionStatus.ACCEPTED, - ): - # Remove the instruction from sent_instructions list - connection_state.sent_instructions = [ - instr - for instr in connection_state.sent_instructions - if instr.message_id != message.instruction_id - ] - self._logger(websocket).debug( - f" ๐Ÿ—‘๏ธ Removed {instr_id_short}... from memory" - ) - - def handle_frbc_system_description( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, FRBCSystemDescription): - return - - # Get resource_id from websocket mapping - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self.ensure_resource_is_registered(resource_id=resource_id) - - self._device_data[resource_id].system_description = message - n_actuators = len(message.actuators) if message.actuators else 0 - - # Log details about actuators - for actuator in message.actuators: - n_modes = len(actuator.operation_modes) if actuator.operation_modes else 0 - n_transitions = len(actuator.transitions) if actuator.transitions else 0 - n_timers = len(actuator.timers) if actuator.timers else 0 - self._logger(websocket).debug( - f" โš™๏ธ Actuator {str(actuator.id)[:8]}...: {n_modes} modes, {n_transitions} transitions, {n_timers} timers" - ) - self.ensure_actuator_is_registered( - actuator_id=str(actuator.id), resource_id=resource_id - ) - self.ensure_actuator_is_registered( - actuator_id=str(actuator.id), resource_id=resource_id - ) - - # Log storage details - if message.storage: - self._logger(websocket).debug( - f" ๐Ÿ’พ Storage: {message.storage.fill_level_range.start_of_range}-{message.storage.fill_level_range.end_of_range} {message.storage.fill_level_label or '%'}" - ) - - self.save_attribute(resource_id, **json.loads(message.to_json())) - self._logger(websocket).info(f"๐Ÿ“‹ SystemDescription: {n_actuators} actuator(s)") - self._check_and_generate_instructions(resource_id, websocket) - - def handle_frbc_fill_level_target_profile( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, FRBCFillLevelTargetProfile): - return - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self._logger(websocket).info( - f"๐ŸŽฏ Received FRBCFillLevelTargetProfile for {resource_id}" - ) - self._logger(websocket).debug(message.to_json()) - - self.ensure_resource_is_registered(resource_id=resource_id) - - self._device_data[resource_id].fill_level_target_profile = message - n_elements = len(message.elements) if message.elements else 0 - - # Log target profile details - if message.elements: - try: - # Duration objects have a value in milliseconds - total_duration_ms = sum(int(elem.duration) for elem in message.elements) - total_duration_min = total_duration_ms / 60000 - self._logger(websocket).debug( - f" ๐ŸŽฏ Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" - ) - except Exception as e: - self._logger(websocket).debug( - f" ๐ŸŽฏ Start: {message.start_time.strftime('%H:%M:%S')}" - ) - - self._logger(websocket).info(f"๐ŸŽฏ TargetProfile: {n_elements} element(s)") - self._check_and_generate_instructions(resource_id, websocket) - - def handle_power_measurement( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, PowerMeasurement): - return - - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self.ensure_resource_is_registered(resource_id=resource_id) - - power_measurements = message.values - for measurement in power_measurements: - try: - self.save_event( - sensor_name=measurement.commodity_quantity, - event_value=measurement.value, - event_start=message.measurement_timestamp, - data_source_id=self.data_source_id, - resource_or_actuator_id=resource_id, - ) - except Exception as exc: - self._logger(websocket).warning( - f"PowerMeasurement could not be saved: {str(exc)}" - ) - - def handle_frbc_storage_status( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, FRBCStorageStatus): - return - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self._logger(websocket).info(f"๐Ÿ”‹ Received FRBCStorageStatus for {resource_id}") - self._logger(websocket).debug(message.to_json()) - - self.ensure_resource_is_registered(resource_id=resource_id) - - self._device_data[resource_id].storage_status = message - self.save_event( - sensor_name="fill level", - event_value=message.present_fill_level, - data_source_id=self.data_source_id, - resource_or_actuator_id=resource_id, - ) - self._logger(websocket).info( - f"๐Ÿ”‹ StorageStatus: {message.present_fill_level:.1f}%" - ) - self._check_and_generate_instructions(resource_id, websocket) - - def handle_frbc_actuator_status( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, FRBCActuatorStatus): - return - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self._logger(websocket).info( - f"โš™๏ธ Received FRBCActuatorStatus for {resource_id} (actuator: {message.actuator_id})" - ) - self._logger(websocket).debug(message.to_json()) - - self.ensure_resource_is_registered(resource_id=resource_id) - - # Store actuator status by actuator_id to support multiple actuators - self._device_data[resource_id].actuator_statuses[ - str(message.actuator_id) - ] = message - self._logger(websocket).debug( - f"โš™๏ธ ActuatorStatus: factor={message.operation_mode_factor}" - ) - self._check_and_generate_instructions(resource_id, websocket) - - def handle_frbc_usage_forecast( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, FRBCUsageForecast): - return - - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self.ensure_resource_is_registered(resource_id=resource_id) - - self._device_data[resource_id].usage_forecast = message - n_elements = len(message.elements) if message.elements else 0 - - # Log usage forecast details - if message.elements: - try: - # Duration objects have a value in milliseconds - total_duration_ms = sum(int(elem.duration) for elem in message.elements) - total_duration_min = total_duration_ms / 60000 - self._logger(websocket).debug( - f" ๐Ÿ’ง Total duration: {total_duration_min:.0f} min, Start: {message.start_time.strftime('%H:%M:%S')}" - ) - except Exception as e: - self._logger(websocket).debug( - f" ๐Ÿ’ง Start: {message.start_time.strftime('%H:%M:%S')}" - ) - - self._logger(websocket).info(f"๐Ÿ’ง UsageForecast: {n_elements} element(s)") - self._check_and_generate_instructions(resource_id, websocket) - - def handle_frbc_leakage_behaviour( - self, _: "S2FlaskWSServerSync", message: S2Message, websocket: Sock - ) -> None: - if not isinstance(message, FRBCLeakageBehaviour): - return - - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - self.ensure_resource_is_registered(resource_id=resource_id) - - self._device_data[resource_id].leakage_behaviour = message - n_elements = len(message.elements) if message.elements else 0 - - # Log leakage behaviour details - if message.elements: - try: - # Log first element's leakage rate as example - first_elem = message.elements[0] - leakage_rate = first_elem.leakage_rate - fill_range = first_elem.fill_level_range - self._logger(websocket).debug( - f" ๐Ÿ”„ Leakage rate: {leakage_rate}, Fill range: {fill_range.start_of_range}-{fill_range.end_of_range}" - ) - except Exception: - pass - - self._logger(websocket).info(f"๐Ÿ”„ LeakageBehaviour: {n_elements} element(s)") - self._check_and_generate_instructions(resource_id, websocket) - - def ensure_resource_is_registered(self, resource_id: str): - try: - asset_type = get_or_create_model(AssetType, name="S2 Resource") - self._assets[resource_id] = get_or_create_model( - model_class=Asset, - name=resource_id, - account_id=self.account.id, - generic_asset_type=asset_type, - ) - except Exception as exc: - self.app.logger.warning( - f"Resource could not be saved as an asset: {str(exc)}" - ) - if resource_id not in self._device_data: - self._device_data[resource_id] = FRBCDeviceData() - - def ensure_actuator_is_registered(self, actuator_id: str, resource_id: str): - try: - asset_type = get_or_create_model(AssetType, name="S2 Actuator") - self._assets[actuator_id] = get_or_create_model( - model_class=Asset, - name=actuator_id, - account_id=self.account.id, - generic_asset_type=asset_type, - parent_asset=self._assets[resource_id], - ) - except Exception as exc: - self.app.logger.warning( - f"Actuator could not be saved as an asset: {str(exc)}" - ) - - def save_attribute(self, resource_id: str, **kwargs): - asset = self._assets[resource_id] - for k, v in kwargs.items(): - try: - asset.attributes[k] = v - except Exception as exc: - self.app.logger.warning( - f"Failed to save {k}: {v} as an asset attribute of {asset}: {str(exc)}" - ) - - @only_if_timer_due("sensor_name", "resource_or_actuator_id", "data_source_id") - def save_event( - self, - sensor_name: str, - resource_or_actuator_id: str, - event_value: float | pd.Series, - data_source_id: int, - event_start: str | None = None, - event_resolution: timedelta | None = None, - event_unit: str = "", - sensor_unit: str = "", - ): - if event_resolution is None: - event_resolution = timedelta(0) - try: - asset = self._assets[resource_or_actuator_id] - sensor = get_or_create_model( - model_class=Sensor, - name=sensor_name, - unit=sensor_unit, - event_resolution=event_resolution, - timezone=self.app.config["FLEXMEASURES_TIMEZONE"], - generic_asset=asset, - ) - except Exception as exc: - self.app.logger.warning( - f"{capitalize(sensor_name)} sensor could not be saved: {str(exc)}" - ) - return - try: - event_value = convert_units( - event_value, - event_unit, - sensor_unit, - event_resolution=self.s2_scheduler.resolution, - ) - try: - data_source = db.session.get(Source, data_source_id) - except Exception as exc: - self.app.logger.warning( - f"Data source {data_source_id} could not be freshly fetched: {str(exc)}" - ) - if isinstance(event_value, float): - belief = TimedBelief( - sensor=sensor, - source=data_source, - event_start=event_start - or floored_server_now(self._minimum_measurement_period), - event_value=event_value, - belief_time=server_now(), - cumulative_probability=0.5, - ) - bdf = BeliefsDataFrame(beliefs=[belief]) - elif isinstance(event_value, pd.Series): - bdf = BeliefsDataFrame( - event_value, - sensor=sensor, - source=data_source, - belief_time=server_now(), - cumulative_probability=0.5, - ) - else: - logger.error(f"Cannot save event values of type {type(event_value)}.") - return - save_to_db(bdf) - self.app.logger.debug( - f"โœ… {capitalize(sensor_name)} saved successfully: {bdf}" - ) - - except Exception as exc: - self.app.logger.warning( - f"{capitalize(sensor_name)} could not be saved as sensor data: {str(exc)}" - ) - - def _logger(self, websocket: Sock) -> logging.Logger: - """Get the logger associated with the resource ID, or the app logger otherwise.""" - resource_id = self._websocket_to_resource.get(websocket, "default_resource") - dd: str | None = self._device_data.get(resource_id) - if hasattr(dd, "logger"): - lgr = dd.logger - else: - lgr = self.app.logger - return lgr - - def _check_and_generate_instructions( - self, resource_id: str, websocket: Sock - ) -> None: # noqa: C901 - """Check if we have all required data and generate instructions if so.""" - device_data = self._device_data.get(resource_id) - if device_data: - # Build detailed status about what's missing - missing_items = [] - - if not device_data.system_description: - missing_items.append("โŒ SystemDescription") - else: - missing_items.append("โœ… SystemDescription") - - if not device_data.fill_level_target_profile: - missing_items.append("โŒ FillLevelTargetProfile") - else: - missing_items.append("โœ… FillLevelTargetProfile") - - if not device_data.usage_forecast: - missing_items.append("โŒ UsageForecast") - else: - missing_items.append("โœ… UsageForecast") - - if not device_data.leakage_behaviour: - missing_items.append("โŒ LeakageBehaviour") - else: - missing_items.append("โœ… LeakageBehaviour") - - if not device_data.storage_status: - missing_items.append("โŒ StorageStatus") - else: - missing_items.append("โœ… StorageStatus") - - # Check actuator statuses in detail - if ( - device_data.system_description - and device_data.system_description.actuators - ): - required_actuators = { - str(a.id) for a in device_data.system_description.actuators - } - received_actuators = set(device_data.actuator_statuses.keys()) - missing_actuators = required_actuators - received_actuators - - if missing_actuators: - missing_items.append( - f"โŒ ActuatorStatus ({len(received_actuators)}/{len(required_actuators)} received)" - ) - for missing_id in missing_actuators: - self._logger(websocket).debug( - f" โณ Missing actuator status for: {missing_id}" - ) - else: - missing_items.append( - f"โœ… ActuatorStatus (all {len(required_actuators)} received)" - ) - else: - missing_items.append("โŒ ActuatorStatus (no actuators defined)") - - # Log the status - status_summary = " | ".join(missing_items) - self._logger(websocket).debug(f"๐Ÿ“Š Device readiness: {status_summary}") - - if device_data is None or not device_data.is_complete(): - # Log what's still missing - if device_data is None: - self._logger(websocket).info( - f"โณ No device data yet for {resource_id[:8]}..." - ) - else: - missing = [] - if not device_data.system_description: - missing.append("SystemDescription") - if ( - not device_data.fill_level_target_profile - and not device_data.usage_forecast - ): - missing.append("FillLevelTargetProfile or UsageForecast") - if not device_data.storage_status: - missing.append("StorageStatus") - if ( - device_data.system_description - and device_data.system_description.actuators - ): - required = { - str(a.id) for a in device_data.system_description.actuators - } - received = set(device_data.actuator_statuses.keys()) - if required - received: - missing.append("ActuatorStatus") - - self._logger(websocket).info(f"โณ Waiting for: {', '.join(missing)}") - return - - # Check rate limiting based on FLEXMEASURES_S2_REPLANNING_FREQUENCY - connection_state = self._connection_states.get(websocket) - if connection_state is None: - self._logger(websocket).warning( - f"โš ๏ธ No connection state for {resource_id[:8]}..." - ) - return - - # Parse replanning frequency from config - replanning_freq_str = self.app.config.get( - "FLEXMEASURES_S2_REPLANNING_FREQUENCY", "PT5M" - ) - try: - replanning_frequency = parse_duration(replanning_freq_str) - if replanning_frequency is None: - raise ValueError(f"Invalid duration format: {replanning_freq_str}") - if not isinstance(replanning_frequency, timedelta): - # Handle isodate.Duration objects by converting to timedelta - # For simplicity, assume it's a basic duration that can be converted - replanning_frequency = timedelta( - seconds=replanning_frequency.total_seconds() - ) - except Exception as e: - self.app.logger.error( - f"โŒ Error parsing FLEXMEASURES_S2_REPLANNING_FREQUENCY '{replanning_freq_str}': {e}" - ) - replanning_frequency = timedelta(minutes=5) # Default to 5 minutes - - # Check if we can compute based on rate limiting - if not connection_state.can_compute(replanning_frequency): - time_since_last = ( - datetime.now(timezone.utc) - connection_state.last_compute_time - ) - remaining_time = replanning_frequency - time_since_last - self._logger(websocket).debug( - f"โฑ๏ธ Rate limit: wait {remaining_time.total_seconds():.0f}s (last: {time_since_last.total_seconds():.0f}s ago)" - ) - return - - self._logger(websocket).info( - f"๐ŸŽฏ Generating instructions for {resource_id[:8]}..." - ) - - try: - # Use the S2FlaskScheduler to create and store device state - if hasattr(self, "s2_scheduler") and self.s2_scheduler is not None: - # Create S2FrbcDeviceState from FRBC messages and store in scheduler - self.s2_scheduler.frbc_device_data = device_data - self.s2_scheduler.device_state = self.s2_scheduler.create_device_states_from_frbc_data( - # resource_id=resource_id, - # system_description=device_data.system_description, - # fill_level_target_profile=device_data.fill_level_target_profile, - # storage_status=device_data.storage_status, - # actuator_status=device_data.actuator_status, - ) - - # Update the compute time before calling the scheduler - connection_state.update_compute_time() - - # Recalculate scheduler start time: 15 minutes from now, aligned to 5-minute boundary - now = datetime.now(timezone.utc) - future_time = now + timedelta(minutes=15) - minutes_offset = future_time.minute % 5 - start_aligned = future_time.replace( - minute=future_time.minute - minutes_offset, second=0, microsecond=0 - ) - - # Update scheduler time window - self.s2_scheduler.start = start_aligned - self.s2_scheduler.end = start_aligned + timedelta( - hours=24 - ) # 24-hour planning window - self.s2_scheduler.belief_time = start_aligned - - self._logger(websocket).debug( - f"๐Ÿ• Scheduler window: {self.s2_scheduler.start.strftime('%Y-%m-%d %H:%M:%S')} โ†’ {self.s2_scheduler.end.strftime('%Y-%m-%d %H:%M:%S')}" - ) - - # Generate instructions using the scheduler (this may query the database for costs) - schedule_results = self.s2_scheduler.compute() - - # Filter and send generated instructions - frbc_instructions = [ - result - for result in schedule_results - if isinstance(result, FRBCInstruction) - ] - filtered_instructions = self._filter_instructions_by_operation_mode( - frbc_instructions, connection_state, websocket - ) - - # Revoke previous instructions before sending new ones - self._revoke_previous_instructions(connection_state, websocket) - - # Log instruction summary before sending - if filtered_instructions: - self._logger(websocket).info( - f"๐Ÿ“ค Sending {len(filtered_instructions)} instruction(s):" - ) - - # Send new instructions and store them - current_time = datetime.now(timezone.utc) - for idx, instruction in enumerate(filtered_instructions, 1): - self._send_and_forget(instruction, websocket) - - # Full IDs - instr_id_full = str(instruction.message_id) - mode_id_full = str(instruction.operation_mode) - actuator_id_full = str(instruction.actuator_id) - - # Short IDs for compact display - instr_id_short = instr_id_full[:8] - mode_id_short = mode_id_full[:8] - actuator_short = actuator_id_full[:8] - - exec_time = ( - instruction.execution_time.strftime("%H:%M:%S") - if hasattr(instruction.execution_time, "strftime") - else str(instruction.execution_time) - ) - factor = instruction.operation_mode_factor - - # Validate that execution time is in the future - if hasattr(instruction.execution_time, "tzinfo"): - exec_datetime = instruction.execution_time - time_until_exec = (exec_datetime - current_time).total_seconds() - if time_until_exec < 0: - self._logger(websocket).warning( - f" โš ๏ธ Instruction {instr_id_short}... has execution time {time_until_exec:.0f}s in the PAST!" - ) - elif time_until_exec < 60: - self._logger(websocket).warning( - f" โš ๏ธ Instruction {instr_id_short}... executes in only {time_until_exec:.0f}s (might be too soon)" - ) - - # Log with short IDs for readability - self._logger(websocket).info( - f" {idx}. {instr_id_short}... | mode: {mode_id_short}... | factor: {factor:.2f} | actuator: {actuator_short}... | exec: {exec_time}" - ) - - # Log full IDs at debug level - self._logger(websocket).debug( - f" ๐Ÿ“‹ Full instruction ID: {instr_id_full}" - ) - self._logger(websocket).debug( - f" ๐Ÿ”ง Full operation mode ID: {mode_id_full}" - ) - self._logger(websocket).debug( - f" โš™๏ธ Full actuator ID: {actuator_id_full}" - ) - - # Update the last operation mode for this connection - connection_state.last_operation_mode = instruction.operation_mode - - # Store the sent instructions for future revocation - connection_state.sent_instructions = filtered_instructions.copy() - - # Process non-instruction results - try: - energy_data_count = 0 - for result in schedule_results: - if isinstance(result, dict) and "device" in result: - energy_data_count += 1 - device_short = str(result["device"])[:8] - if isinstance(result.get("data"), pd.Series): - n_values = len(result["data"]) - self._logger(websocket).debug( - f" ๐Ÿ’พ Saving {n_values} energy values for device {device_short}... ({result.get('unit', '?')})" - ) - self.save_event( - sensor_name="power", - resource_or_actuator_id=str(result["device"]), - event_value=result["data"], - data_source_id=self.s2_scheduler.data_source.id, - event_resolution=self.s2_scheduler.resolution, - event_unit=result["unit"], - sensor_unit="W", - ) - if isinstance(result, dict) and "fill level" in result: - self._logger(websocket).debug(f"Saving result: {result}") - self.save_event( - sensor_name="fill level", - resource_or_actuator_id=str(result["fill level"]), - event_value=result["data"], - data_source_id=self.s2_scheduler.data_source.id, - ) - if energy_data_count > 0: - self._logger(websocket).info( - f"๐Ÿ’พ Saved energy data for {energy_data_count} device(s)" - ) - except Exception as exc: - self._logger(websocket).warning( - f"โš ๏ธ Energy data save failed: {str(exc)}" - ) - else: - # Scheduler not available - log warning and skip instruction generation - self.app.logger.warning( - f"โš ๏ธ S2FlaskScheduler not available for {resource_id}" - ) - - except Exception as e: - self.app.logger.error( - f"๐Ÿ’ฅ Error generating instructions for {resource_id}: {e}" - ) - import traceback - - self.app.logger.debug(f"Traceback: {traceback.format_exc()}") - # Continue processing other devices From a1437a119f6556316b214db8a3ea023d3956e8ce Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 8 Dec 2025 10:42:51 +0100 Subject: [PATCH 167/171] refactor: move ws_connection_auth from flexmeasures to flexmeasures-s2 Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 93 --------------------------------------------- 1 file changed, 93 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 9ac40d8979..d5c88fa71a 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -242,97 +242,4 @@ def teardown_request(exception=None): ) as f: f.write(output_html) - @app.before_request - def ws_connection_auth(): - s2_ws = app.extensions["s2_ws_server"] - s2_ws.app = app - # Check if this is the S2 WS connection route - is_ws_connection = ( - request.path == s2_ws.blueprint.url_prefix - and request.headers.get("Upgrade", "").lower() == "websocket" - ) - - if not is_ws_connection or current_user.is_authenticated: - return # Let other before_request hooks handle it - - auth_header = request.headers.get("Authorization", "") - if auth_header.startswith("Bearer "): - token = auth_header.removeprefix("Bearer ").strip() - try: - user_id = app.config.get("FLEXMEASURES_S2_BEARERS", {}).get(token, None) - user_can_use_ws = user_id is not None - except Exception as exc: - app.logger.warning(str(exc)) - user_can_use_ws = token == app.config.get( - "WEBSOCKET_BEARER_TOKEN", None - ) - if user_can_use_ws: - - try: - user = db.session.get(User, user_id) - # Attach account to WebSocket server - s2_ws.account = user.account - s2_ws.user = user - data_source = get_or_create_source(user) - s2_ws.data_source_id = data_source.id - app.logger.info("Account authorized for WebSocket connections") - except: - app.logger.warning("Failed to fetch User") - - # Initialize S2Scheduler for this WebSocket connection if not already done - if getattr(s2_ws, "s2_scheduler", None) is None: - from datetime import datetime, timedelta, timezone - - # Get S2FlaskScheduler class from registered schedulers - scheduler_class = app.data_generators["scheduler"][ - "S2FlaskScheduler" - ] - - # Create scheduler instance with minimal setup for WebSocket usage - scheduler = scheduler_class.__new__(scheduler_class) - - # Set basic time parameters - now = datetime.now(timezone.utc) - resolution = timedelta(minutes=5) - - # Set required attributes for scheduler - # Note: start, end, and belief_time will be recalculated dynamically - # in s2_ws_sync.py before each scheduler call - scheduler.sensor = None - scheduler.asset = None - scheduler.start = now - scheduler.end = now + timedelta(hours=24) - scheduler.resolution = resolution - scheduler.belief_time = now - scheduler.round_to_decimals = 6 - scheduler.flex_model = {} - scheduler.flex_context = {} - scheduler.fallback_scheduler_class = None - scheduler.info = {"scheduler": "S2FlaskScheduler"} - scheduler.config_deserialized = True - scheduler.return_multiple = True - scheduler.data_source = get_or_create_source( - source="FlexMeasures", - source_type="scheduler", - model="S2Scheduler", - version="1", - ) - - # Initialize device states storage - scheduler.device_states = {} - - # Attach scheduler to WebSocket server - s2_ws.s2_scheduler = scheduler - app.logger.info( - "S2FlaskScheduler initialized for WebSocket connections" - ) - - return # Let other before_request hooks handle it - - app.logger.info( - "Unauthorized WS handshake attempt from %s", request.remote_addr - ) - # Send clean 401 without stack trace noise - return Response("Unauthorized", status=401) - return app From 29dd6b0b51096068501dcbeebb11e446ef937fcc Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 8 Dec 2025 11:50:35 +0100 Subject: [PATCH 168/171] refactor: move test_s2_client_rm.py from flexmeasures to flexmeasures-s2 Signed-off-by: F.N. Claessen --- flexmeasures/ws/tests/test_s2_client_rm.py | 241 --------------------- 1 file changed, 241 deletions(-) diff --git a/flexmeasures/ws/tests/test_s2_client_rm.py b/flexmeasures/ws/tests/test_s2_client_rm.py index bf0922e9c1..5ebe123090 100644 --- a/flexmeasures/ws/tests/test_s2_client_rm.py +++ b/flexmeasures/ws/tests/test_s2_client_rm.py @@ -1,48 +1,8 @@ import pytest import logging -import threading -import datetime -import uuid -from typing import Callable import websockets -from s2python.authorization.default_client import S2DefaultClient -from s2python.generated.gen_s2_pairing import ( - S2NodeDescription, - Deployment, - PairingToken, - S2Role, - Protocols, -) - -from s2python.common import ( - EnergyManagementRole, - Duration, - Role, - RoleType, - Commodity, - Currency, - NumberRange, - PowerRange, - CommodityQuantity, -) -from s2python.frbc import ( - FRBCInstruction, - FRBCSystemDescription, - FRBCActuatorDescription, - FRBCStorageDescription, - FRBCOperationMode, - FRBCOperationModeElement, - FRBCFillLevelTargetProfile, - FRBCFillLevelTargetProfileElement, - FRBCStorageStatus, - FRBCActuatorStatus, -) -from s2python.communication.s2_connection import S2Connection, AssetDetails -from s2python.s2_control_type import FRBCControlType, NoControlControlType -from s2python.message import S2Message - logger = logging.getLogger("s2python") SERVER_URL = "ws://127.0.0.1:5000" @@ -62,204 +22,3 @@ async def test_ping2_echo(connect_to_ws): await ws.send("close") with pytest.raises(websockets.exceptions.ConnectionClosedOK): await ws.recv(), "expected that, after sending 'close', server breaks loop; connection closes" - - -class MyFRBCControlType(FRBCControlType): - def handle_instruction( - self, conn: S2Connection, msg: S2Message, send_okay: Callable[[], None] - ) -> None: - if not isinstance(msg, FRBCInstruction): - raise RuntimeError( - f"Expected an FRBCInstruction but received a message of type {type(msg)}." - ) - print(f"I have received the message {msg} from {conn}") - - def activate(self, conn: S2Connection) -> None: - print("The control type FRBC is now activated.") - - print("Time to send a FRBC SystemDescription") - actuator_id = uuid.uuid4() - operation_mode_id = uuid.uuid4() - conn.send_msg_and_await_reception_status_sync( - FRBCSystemDescription( - message_id=uuid.uuid4(), - valid_from=datetime.datetime.now(tz=datetime.timezone.utc), - actuators=[ - FRBCActuatorDescription( - id=actuator_id, - operation_modes=[ - FRBCOperationMode( - id=operation_mode_id, - elements=[ - FRBCOperationModeElement( - fill_level_range=NumberRange( - start_of_range=0.0, end_of_range=100.0 - ), - fill_rate=NumberRange( - start_of_range=-5.0, end_of_range=5.0 - ), - power_ranges=[ - PowerRange( - start_of_range=-200.0, - end_of_range=200.0, - commodity_quantity=CommodityQuantity.ELECTRIC_POWER_L1, - ) - ], - ) - ], - diagnostic_label="Load & unload battery", - abnormal_condition_only=False, - ) - ], - transitions=[], - timers=[], - supported_commodities=[Commodity.ELECTRICITY], - ) - ], - storage=FRBCStorageDescription( - fill_level_range=NumberRange( - start_of_range=0.0, end_of_range=100.0 - ), - fill_level_label="%", - diagnostic_label="Imaginary battery", - provides_fill_level_target_profile=True, - provides_leakage_behaviour=False, - provides_usage_forecast=False, - ), - ) - ) - print("Also send the target profile") - - conn.send_msg_and_await_reception_status_sync( - FRBCFillLevelTargetProfile( - message_id=uuid.uuid4(), - start_time=datetime.datetime.now(tz=datetime.timezone.utc), - elements=[ - FRBCFillLevelTargetProfileElement( - duration=Duration.from_milliseconds(30_000), - fill_level_range=NumberRange( - start_of_range=20.0, end_of_range=30.0 - ), - ), - FRBCFillLevelTargetProfileElement( - duration=Duration.from_milliseconds(300_000), - fill_level_range=NumberRange( - start_of_range=40.0, end_of_range=50.0 - ), - ), - ], - ) - ) - - print("Also send the storage status.") - conn.send_msg_and_await_reception_status_sync( - FRBCStorageStatus(message_id=uuid.uuid4(), present_fill_level=10.0) - ) - - print("Also send the actuator status.") - conn.send_msg_and_await_reception_status_sync( - FRBCActuatorStatus( - message_id=uuid.uuid4(), - actuator_id=actuator_id, - active_operation_mode_id=operation_mode_id, - operation_mode_factor=0.5, - ) - ) - - def deactivate(self, conn: S2Connection) -> None: - print("The control type FRBC is now deactivated.") - - -class MyNoControlControlType(NoControlControlType): - def activate(self, conn: S2Connection) -> None: - print("The control type NoControl is now activated.") - - def deactivate(self, conn: S2Connection) -> None: - print("The control type NoControl is now deactivated.") - - -if __name__ == "__main__": - # Configuration - # parser = argparse.ArgumentParser(description="S2 pairing example for FRBC RM") - # parser.add_argument("--pairing_endpoint", type=str, required=True) - # parser.add_argument("--pairing_token", type=str, required=True) - - # args = parser.parse_args() - - # pairing_endpoint = args.pairing_endpoint - # pairing_token = args.pairing_token - - # --- Client Setup --- - # Create node description - node_description = S2NodeDescription( - brand="TNO", - logoUri="https://www.tno.nl/publish/pages/5604/tno-logo-1484x835_003_.jpg", - type="demo frbc example", - modelName="S2 pairing example stub", - userDefinedName="TNO S2 pairing example for frbc", - role=S2Role.RM, - deployment=Deployment.LAN, - ) - - # Create a client to perform the pairing - pairing_endpoint = f"{SERVER_URL}/s2" - pairing_token = "1234567890" - client = S2DefaultClient( - pairing_uri=pairing_endpoint, - token=PairingToken(token=pairing_token), - node_description=node_description, - verify_certificate=False, - supported_protocols=[Protocols.WebSocketSecure], - ) - - try: - # # Request pairing - # logger.info("Initiating pairing with endpoint: %s", pairing_endpoint) - # pairing_response = client.request_pairing() - # logger.info("Pairing request successful, requesting connection...") - - # # Request connection details - # connection_details = client.request_connection() - # logger.info("Connection request successful") - - # # Solve challenge - # challenge_result = client.solve_challenge() - # logger.info("Challenge solved successfully") - - s2_connection = S2Connection( - url=f"{SERVER_URL}/s2", # type: ignore - role=EnergyManagementRole.RM, - control_types=[MyFRBCControlType(), MyNoControlControlType()], - asset_details=AssetDetails( - resource_id=client.client_node_id, - name="Some asset", - instruction_processing_delay=Duration.from_milliseconds(20), - roles=[ - Role(role=RoleType.ENERGY_CONSUMER, commodity=Commodity.ELECTRICITY) - ], - currency=Currency.EUR, - provides_forecast=False, - provides_power_measurements=[CommodityQuantity.ELECTRIC_POWER_L1], - ), - reconnect=True, - verify_certificate=False, - bearer_token=pairing_token, - ) - - # Start S2 session with the connection details - logger.info("Starting S2 session...") - s2_connection.start_as_rm() - logger.info("S2 session is running. Press Ctrl+C to exit.") - - # Keep the main thread alive to allow the WebSocket connection to run. - event = threading.Event() - event.wait() - - except KeyboardInterrupt: - logger.info("Program interrupted by user.") - except Exception as e: - logger.error("Error during pairing process: %s", e, exc_info=True) - raise e - finally: - client.close_connection() - logger.info("Connection closed.") From 6c0d9e6d68749932cf2addd07425d5da24504275 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 8 Dec 2025 11:55:11 +0100 Subject: [PATCH 169/171] refactor: move s2-python[ws] requirement from flexmeasures to flexmeasures-s2 Signed-off-by: F.N. Claessen --- requirements/app.in | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements/app.in b/requirements/app.in index d80428d2fc..b139206346 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -68,4 +68,3 @@ werkzeug vl-convert-python Pillow>=10.0.1 # https://github.com/FlexMeasures/flexmeasures/security/dependabot/91 flask-sock -s2-python[ws] From 18e8476b668f7409ef69c1217f50d3d9d5caf67d Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 8 Dec 2025 11:56:16 +0100 Subject: [PATCH 170/171] chore: clean up diff Signed-off-by: F.N. Claessen --- .gitignore | 3 +- .python-version | 1 - .vscode/settings.json | 38 +++--- Makefile | 8 +- documentation/changelog.rst | 1 - flexmeasures/app.py | 6 +- notebooks/websocket_analysis.ipynb | 206 ----------------------------- 7 files changed, 25 insertions(+), 238 deletions(-) delete mode 100644 .python-version delete mode 100644 notebooks/websocket_analysis.ipynb diff --git a/.gitignore b/.gitignore index e1dfd2c8b6..8121120351 100644 --- a/.gitignore +++ b/.gitignore @@ -53,5 +53,4 @@ coverage.lcov venv* logs/ *.dump -iframe_figures/ -flexmeasures-env-311/ +iframe_figures/ \ No newline at end of file diff --git a/.python-version b/.python-version deleted file mode 100644 index 3e72aa6986..0000000000 --- a/.python-version +++ /dev/null @@ -1 +0,0 @@ -3.11.10 diff --git a/.vscode/settings.json b/.vscode/settings.json index 8649119566..056ea4f4bb 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,21 +1,21 @@ { - "spellright.language": ["en_US"], - "spellright.documentTypes": [ - "markdown", - "latex", - "plaintext", - "restructuredtext", - "python" - ], - "python.linting.enabled": true, - "python.linting.pylintEnabled": false, - "python.linting.flake8Enabled": true, - "workbench.editor.wrapTabs": true, - "python.formatting.provider": "black", - "python.testing.pytestArgs": ["flexmeasures"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "python.analysis.autoImportCompletions": true, - "stm32-for-vscode.openOCDPath": false, - "stm32-for-vscode.armToolchainPath": false + "spellright.language": ["en_US"], + "spellright.documentTypes": [ + "markdown", + "latex", + "plaintext", + "restructuredtext", + "python" + ], + "python.linting.enabled": true, + "python.linting.pylintEnabled": false, + "python.linting.flake8Enabled": true, + "workbench.editor.wrapTabs": true, + "python.formatting.provider": "black", + "python.testing.pytestArgs": ["flexmeasures"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.analysis.autoImportCompletions": true, + "stm32-for-vscode.openOCDPath": false, + "stm32-for-vscode.armToolchainPath": false } diff --git a/Makefile b/Makefile index 90ac74a4ce..c1d3359a7c 100644 --- a/Makefile +++ b/Makefile @@ -42,10 +42,10 @@ install-for-dev: make ensure-deps-folder pip-sync requirements/${PYV}/app.txt requirements/${PYV}/dev.txt requirements/${PYV}/test.txt make install-flexmeasures -# # Locally install HiGHS on macOS -# @if [ "$(shell uname)" = "Darwin" ]; then \ -# make install-highs-macos; \ -# fi +# Locally install HiGHS on macOS + @if [ "$(shell uname)" = "Darwin" ]; then \ + make install-highs-macos; \ + fi install-for-test: make install-pip-tools diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 0e4e27c455..43e52b5a5a 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -18,7 +18,6 @@ New features * Add form to upload sensor data to the database [see `PR #1481 `_] * Allow editing users in the UI [see `PR #1502 `_] * Smarter toast notifications [see `PR #1530 `_] -* Sticky replay button for asset and sensor pages [see `PR #1739 `_] * Move various warnings to toast notifications [see `PR #1529 `_] * Document how to set a parent asset when creating an asset through the API, and show parent assets in ``flexmeasures show account`` [see `PR #1533 `_] * Add ``flexmeasures show assets`` CLI command for listing public assets and option ``--account `` to list assets owned by a specific account [see `PR #1536 `_] diff --git a/flexmeasures/app.py b/flexmeasures/app.py index d5c88fa71a..891fd51c08 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -10,8 +10,7 @@ from pathlib import Path from datetime import date -from flask import Flask, g, request, Response -from flask_security import current_user +from flask import Flask, g, request from flask.cli import load_dotenv from flask_mail import Mail from flask_sslify import SSLify @@ -21,9 +20,6 @@ from redis import Redis from rq import Queue -from flexmeasures import User -from flexmeasures.data import db -from flexmeasures.data.services.data_sources import get_or_create_source from flexmeasures.data.services.job_cache import JobCache diff --git a/notebooks/websocket_analysis.ipynb b/notebooks/websocket_analysis.ipynb deleted file mode 100644 index 03fbbcf178..0000000000 --- a/notebooks/websocket_analysis.ipynb +++ /dev/null @@ -1,206 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 64, - "id": "2b691e65-0818-438a-b484-9ce439baef44", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import pandas as pd\n", - "\n", - "import plotly.offline as pyo\n", - "import plotly.graph_objs as go\n", - "import plotly.io as pio\n", - "\n", - "\n", - "import plotly_express as px\n", - "pio.renderers.default = 'iframe'\n", - "\n", - "data = pd.read_csv(\"results-db-get-sensor-1.csv\", names=[\"time\", \"type\", \"id\", \"delta\"])\n", - "data[\"time\"] = data[\"time\"].apply(lambda x: pd.Timestamp.fromtimestamp(x))\n", - "data = data.dropna()\n", - "data[\"id2\"] = data.apply(lambda x: f\"{x['type']}: {x['id']}\", axis=1)\n", - "fig = px.line(data, x=\"time\", y=\"delta\", color=\"type\", labels={\n", - " \"time\" : \"Time\",\n", - " \"delta\": \"Roundtrip Time (s)\",\n", - " \"type\" : \"Protocol\"\n", - "}, title=\"Roundtrip Time with 1000 concurrent WS connections @ 1Hz and 1000 concurrent API requests @ 1Hz\")\n", - "fig.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 65, - "id": "3a8f9091-b8bd-4802-af36-bb01a2942f9a", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
id
type
API5
WS623
\n", - "
" - ], - "text/plain": [ - " id\n", - "type \n", - "API 5\n", - "WS 623" - ] - }, - "execution_count": 65, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "data[[\"id\", \"type\"]].drop_duplicates().groupby(\"type\").count()" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "066e04d7-148d-48a9-8de9-2cd9773b687e", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "fig = px.histogram(data[8000:], x=\"delta\", color=\"type\", barmode=\"overlay\", labels={\n", - " \"delta\": \"Roundtrip Time (s)\",\n", - " \"type\" : \"Protocol\"\n", - "})\n", - "fig.update_traces(opacity=.9)\n", - "fig" - ] - }, - { - "cell_type": "code", - "execution_count": 49, - "id": "12555575-8f72-422e-a030-a5ea0df93a56", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0 WS: 0\n", - "1 WS: 1\n", - "2 WS: 2\n", - "3 WS: 3\n", - "4 WS: 4\n", - " ... \n", - "150 WS: 1\n", - "151 WS: 0\n", - "152 WS: 2\n", - "153 WS: 4\n", - "154 WS: 3\n", - "Length: 155, dtype: object" - ] - }, - "execution_count": 49, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bc46bc48-c6c4-4326-9462-d5a89111865f", - "metadata": {}, - "outputs": [], - "source": [ - "data.gr" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "fm", - "language": "python", - "name": "fm" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 0a780b035fc54a70267b4fd0b66a355c0cea66e2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 8 Dec 2025 12:09:57 +0100 Subject: [PATCH 171/171] refactor: move import to top of module Signed-off-by: F.N. Claessen --- flexmeasures/app.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/flexmeasures/app.py b/flexmeasures/app.py index 891fd51c08..5b25155b87 100644 --- a/flexmeasures/app.py +++ b/flexmeasures/app.py @@ -21,6 +21,7 @@ from rq import Queue from flexmeasures.data.services.job_cache import JobCache +from flexmeasures.ws import sock def create( # noqa C901 @@ -51,9 +52,6 @@ def create( # noqa C901 # as we need to know the ENV now (for it to be recognised by Flask()). load_dotenv() app = Flask("flexmeasures") - - from flexmeasures.ws import sock - sock.init_app(app) if env is not None: # overwrite