Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 24 additions & 1 deletion src/mesido/esdl/esdl_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import copy
import dataclasses
import logging
import os
import sys
from datetime import timedelta
from pathlib import Path
Expand Down Expand Up @@ -71,6 +72,8 @@ class ESDLMixin(
for example demand profiles.
"""

csv_ensemble_mode: bool = False

esdl_run_info_path: Path = None

esdl_pi_validate_timeseries: bool = False
Expand Down Expand Up @@ -691,6 +694,19 @@ def read(self) -> None:
None
"""
super().read()
ensemble_size = 1
self.__ensemble = None
if self.csv_ensemble_mode:
self.__ensemble = np.genfromtxt(
os.path.join(self._input_folder, "ensemble.csv"),
delimiter=",",
deletechars="",
dtype=None,
names=True,
encoding=None,
)
ensemble_size = len(self.__ensemble)

energy_system_components = self.energy_system_components
esdl_carriers = self.esdl_carriers
self.hot_cold_pipe_relations()
Expand All @@ -701,7 +717,8 @@ def read(self) -> None:
esdl_asset_id_to_name_map=self.esdl_asset_id_to_name_map,
esdl_assets=self.esdl_assets,
carrier_properties=esdl_carriers,
ensemble_size=self.ensemble_size,
ensemble_size=ensemble_size,
ensemble=self.__ensemble,
)

def write(self) -> None:
Expand Down Expand Up @@ -826,3 +843,9 @@ def filter_asset_measures(
filtered_assets[asset_id] = asset_type

return filtered_assets

def ensemble_member_probability(self, ensemble_member):
if self.csv_ensemble_mode:
return self.__ensemble["probability"][ensemble_member]
else:
return 1.0
70 changes: 44 additions & 26 deletions src/mesido/esdl/profile_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ def read_profiles(
esdl_assets: Dict[str, Asset],
carrier_properties: Dict[str, Dict],
ensemble_size: int,
ensemble,
) -> None:
"""
This function takes a datastore and a dictionary of milp network components and loads a
Expand Down Expand Up @@ -91,6 +92,7 @@ def read_profiles(
esdl_asset_id_to_name_map=esdl_asset_id_to_name_map,
carrier_properties=carrier_properties,
ensemble_size=ensemble_size,
ensemble=ensemble,
)

try:
Expand Down Expand Up @@ -181,6 +183,7 @@ def _load_profiles_from_source(
esdl_asset_id_to_name_map: Dict[str, str],
carrier_properties: Dict[str, Dict],
ensemble_size: int,
ensemble: None | np.ndarray,
) -> None:
"""
This function must be implemented by the child. It must load the available
Expand Down Expand Up @@ -240,6 +243,7 @@ def _load_profiles_from_source(
esdl_asset_id_to_name_map: Dict[str, str],
carrier_properties: Dict[str, Dict],
ensemble_size: int,
ensemble: None | np.ndarray,
) -> None:
profiles: Dict[str, np.ndarray] = dict()
logger.info("Reading profiles from InfluxDB")
Expand Down Expand Up @@ -613,6 +617,7 @@ def _load_profiles_from_source(
esdl_asset_id_to_name_map: Dict[str, str],
carrier_properties: Dict[str, Dict],
ensemble_size: int,
ensemble: None | np.ndarray,
) -> None:
if self._file_path.suffix == ".xml":
logger.warning(
Expand All @@ -627,6 +632,7 @@ def _load_profiles_from_source(
energy_system_components=energy_system_components,
carrier_properties=carrier_properties,
ensemble_size=ensemble_size,
ensemble=ensemble,
)
else:
raise _ProfileParserException(
Expand All @@ -638,25 +644,27 @@ def _load_csv(
energy_system_components: Dict[str, Set[str]],
carrier_properties: Dict[str, Dict],
ensemble_size: int,
ensemble,
) -> None:
data = pd.read_csv(self._file_path)

if len(data.filter(like="Unnamed").columns) > 0:
raise Exception(
f"An unnamed column has been found in profile source file: {self._file_path}"
)
data_dict = {}
if ensemble_size > 1:
input_folder = self._file_path.parent
file_name = self._file_path.name
for i, ensemble_name, _ in ensemble:
data_dict[i] = pd.read_csv(Path(input_folder / ensemble_name / file_name))
else:
data_dict[0] = pd.read_csv(self._file_path)

try:
timeseries_import_times = [
datetime.datetime.strptime(entry.replace("Z", ""), "%Y-%m-%d %H:%M:%S").replace(
tzinfo=datetime.timezone.utc
for data in data_dict.values():
if len(data.filter(like="Unnamed").columns) > 0:
raise Exception(
f"An unnamed column has been found in profile source file: {self._file_path}"
)
for entry in data["DateTime"].to_numpy()
]
except ValueError:

try:
timeseries_import_times = [
datetime.datetime.strptime(entry.replace("Z", ""), "%Y-%m-%dT%H:%M:%S").replace(
datetime.datetime.strptime(entry.replace("Z", ""), "%Y-%m-%d %H:%M:%S").replace(
tzinfo=datetime.timezone.utc
)
for entry in data["DateTime"].to_numpy()
Expand All @@ -665,48 +673,58 @@ def _load_csv(
try:
timeseries_import_times = [
datetime.datetime.strptime(
entry.replace("Z", ""), "%d-%m-%Y %H:%M"
entry.replace("Z", ""), "%Y-%m-%dT%H:%M:%S"
).replace(tzinfo=datetime.timezone.utc)
for entry in data["DateTime"].to_numpy()
]
except ValueError:
raise _ProfileParserException("Date time string is not in a supported format")
try:
timeseries_import_times = [
datetime.datetime.strptime(
entry.replace("Z", ""), "%d-%m-%Y %H:%M"
).replace(tzinfo=datetime.timezone.utc)
for entry in data["DateTime"].to_numpy()
]
except ValueError:
raise _ProfileParserException(
"Date time string is not in a supported format"
)

logger.warning("Timezone specification not supported yet: default UTC has been used")
logger.warning("Timezone specification not supported yet: default UTC has been used")

self._reference_datetimes = timeseries_import_times
self._reference_datetimes = timeseries_import_times

for ensemble_member in range(ensemble_size):
for e_m in range(ensemble_size):
data_em = data_dict[e_m]
for component_type, var_name in self.component_type_to_var_name_map.items():
for component_name in energy_system_components.get(component_type, []):
try:
column_name = f"{component_name.replace(' ', '')}"
values = data[column_name].to_numpy()
values = data_em[column_name].to_numpy()
if np.isnan(values).any():
raise Exception(
f"Column name: {column_name}, NaN exists in the profile source"
f" file {self._file_path}."
f" Detials: {data[data[column_name].isnull()]}"
f" Detials: {data_em[data_em[column_name].isnull()]}"
)
except KeyError:
pass
else:
self._profiles[ensemble_member][component_name + var_name] = values
self._profiles[e_m][component_name + var_name] = values
for properties in carrier_properties.values():
carrier_name = properties.get("name")
try:
values = data[carrier_name].to_numpy()
values = data_em[carrier_name].to_numpy()
if np.isnan(values).any():
raise Exception(
f"Carrier name: {carrier_name}, NaN exists in the profile source file"
f" {self._file_path}. Details: {data[data[carrier_name].isnull()]}"
f" {self._file_path}. Details: "
f"{data_em[data_em[carrier_name].isnull()]}"
)
except KeyError:
pass
else:
self._profiles[ensemble_member][
carrier_name + self.carrier_profile_var_name
] = values
self._profiles[e_m][carrier_name + self.carrier_profile_var_name] = values

def _load_xml(self, energy_system_components, esdl_asset_id_to_name_map):
timeseries_import_basename = self._file_path.stem
Expand Down
3 changes: 3 additions & 0 deletions tests/models/unit_cases/case_2a_ensemble/input/ensemble.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Unnamed: 0,name,probability
0,forecast1,0.7
1,forecast2,0.3
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
DateTime,HeatingDemand_7484,HeatingDemand_c6c8,HeatingDemand_6f99
2013-05-19 22:00:00,350000,350000,350000
2013-05-19 23:00:00,350000,350000,350000
2013-05-20 00:00:00,350000,350000,350000
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
DateTime,HeatingDemand_7484,HeatingDemand_c6c8,HeatingDemand_6f99
2013-05-19 22:00:00,350000,350000,300000
2013-05-19 23:00:00,350000,350000,300000
2013-05-20 00:00:00,350000,350000,300000
Loading
Loading