Skip to content

Commit

Permalink
v2.17.5
Browse files Browse the repository at this point in the history
Merge pull request #2111 from AntaresSimulatorTeam/release/2.17.5
  • Loading branch information
skamril authored Aug 2, 2024
2 parents caa9cb8 + aa5b313 commit b491c80
Show file tree
Hide file tree
Showing 28 changed files with 385 additions and 231 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -106,4 +106,4 @@ jobs:
uses: sonarsource/sonarcloud-github-action@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
4 changes: 2 additions & 2 deletions antarest/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@

# Standard project metadata

__version__ = "2.17.4"
__version__ = "2.17.5"
__author__ = "RTE, Antares Web Team"
__date__ = "2024-07-29"
__date__ = "2024-08-02"
# noinspection SpellCheckingInspection
__credits__ = "(c) Réseau de Transport de l’Électricité (RTE)"

Expand Down
9 changes: 6 additions & 3 deletions antarest/core/tasks/web.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from antarest.core.requests import RequestParameters
from antarest.core.tasks.model import TaskDTO, TaskListFilter
from antarest.core.tasks.service import DEFAULT_AWAIT_MAX_TIMEOUT, TaskJobService
from antarest.core.utils.utils import sanitize_uuid
from antarest.core.utils.web import APITag
from antarest.login.auth import Auth

Expand Down Expand Up @@ -61,14 +62,16 @@ def get_task(
Returns:
TaskDTO: Information about the specified task.
"""
sanitized_task_id = sanitize_uuid(task_id)

request_params = RequestParameters(user=current_user)
task_status = service.status_task(task_id, request_params, with_logs)
task_status = service.status_task(sanitized_task_id, request_params, with_logs)

if wait_for_completion and not task_status.status.is_final():
# Ensure 0 <= timeout <= 48 h
timeout = min(max(0, timeout), DEFAULT_AWAIT_MAX_TIMEOUT)
try:
service.await_task(task_id, timeout_sec=timeout)
service.await_task(sanitized_task_id, timeout_sec=timeout)
except concurrent.futures.TimeoutError as exc: # pragma: no cover
# Note that if the task does not complete within the specified time,
# the task will continue running but the user will receive a timeout.
Expand All @@ -78,7 +81,7 @@ def get_task(
detail="The request timed out while waiting for task completion.",
) from exc

return service.status_task(task_id, request_params, with_logs)
return service.status_task(sanitized_task_id, request_params, with_logs)

@bp.put("/tasks/{task_id}/cancel", tags=[APITag.tasks])
def cancel_task(
Expand Down
15 changes: 14 additions & 1 deletion antarest/core/utils/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import base64
import glob
import http
import logging
import os
import re
import shutil
import tempfile
import time
Expand All @@ -10,12 +13,15 @@

import py7zr
import redis
from fastapi import HTTPException

from antarest.core.config import RedisConfig
from antarest.core.exceptions import ShouldNotHappenException

logger = logging.getLogger(__name__)

UUID_PATTERN = re.compile("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")


class DTO:
"""
Expand All @@ -39,7 +45,14 @@ def __repr__(self) -> str:


def sanitize_uuid(uuid: str) -> str:
return str(glob.escape(uuid))
if not UUID_PATTERN.match(uuid):
sanitized_id = base64.b64encode(uuid.encode("utf-8")).decode("utf-8")
raise HTTPException(status_code=http.HTTPStatus.BAD_REQUEST, detail=f"uuid {sanitized_id} is not a valid UUID")
return uuid


def sanitize_string(string: str) -> str:
return str(glob.escape(string))


class BadArchiveContent(Exception):
Expand Down
6 changes: 3 additions & 3 deletions antarest/matrixstore/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,12 +447,12 @@ def create_matrix_files(self, matrix_ids: t.Sequence[str], export_path: Path) ->
if not mtx:
continue
name = f"matrix-{mtx.id}.txt"
filepath = f"{tmpdir}/{name}"
filepath = Path(tmpdir).joinpath(name)
array = np.array(mtx.data, dtype=np.float64)
if array.size == 0:
# If the array or dataframe is empty, create an empty file instead of
# traditional saving to avoid unwanted line breaks.
open(filepath, mode="wb").close()
filepath.touch()
else:
# noinspection PyTypeChecker
np.savetxt(filepath, array, delimiter="\t", fmt="%.18f")
Expand Down Expand Up @@ -544,7 +544,7 @@ def download_matrix(
if array.size == 0:
# If the array or dataframe is empty, create an empty file instead of
# traditional saving to avoid unwanted line breaks.
open(filepath, mode="wb").close()
filepath.touch()
else:
# noinspection PyTypeChecker
np.savetxt(filepath, array, delimiter="\t", fmt="%.18f")
9 changes: 4 additions & 5 deletions antarest/study/business/advanced_parameters_management.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import re
from typing import Any, Dict, List, Optional

from pydantic import validator
Expand Down Expand Up @@ -89,15 +88,15 @@ class AdvancedParamsFormFields(FormFieldsBaseModel):

@validator("accuracy_on_correlation")
def check_accuracy_on_correlation(cls, v: str) -> str:
if len(v.strip()) == 0:
sanitized_v = v.strip().replace(" ", "")
if not sanitized_v:
return ""

allowed_values = ["wind", "load", "solar"]
values_list = re.split(r"\s*,\s*", v.strip())

values_list = sanitized_v.split(",")
if len(values_list) != len(set(values_list)):
raise ValueError("Duplicate value")

allowed_values = ["wind", "load", "solar"]
for value in values_list:
if value not in allowed_values:
raise ValueError(f"Invalid value: {value}")
Expand Down
8 changes: 4 additions & 4 deletions antarest/study/storage/rawstudy/ini_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,13 +63,13 @@ def from_kwargs(
The newly created instance
"""
if section:
section_regex = re.compile(re.escape(section))
section_regex = re.compile(re.escape(section), re.IGNORECASE)
if option:
option_regex = re.compile(re.escape(option))
option_regex = re.compile(re.escape(option), re.IGNORECASE)
if isinstance(section_regex, str):
section_regex = re.compile(section_regex) if section_regex else None
section_regex = re.compile(section_regex, re.IGNORECASE) if section_regex else None
if isinstance(option_regex, str):
option_regex = re.compile(option_regex) if option_regex else None
option_regex = re.compile(option_regex, re.IGNORECASE) if option_regex else None
return cls(section_regex=section_regex, option_regex=option_regex)

def select_section_option(self, section: str, option: str = "") -> bool:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,13 +148,13 @@ def _extract_data_from_file(

is_zip_file: bool = root.suffix.lower() == ".zip"
posix_path: str = inside_root_path.as_posix()
output_data_path = root / inside_root_path

if file_type == FileType.TXT:
# Parse the file as a list of lines, return an empty list if missing.
if is_zip_file:
return _extract_text_from_zip(root, posix_path)
else:
output_data_path = root / inside_root_path
try:
return output_data_path.read_text(encoding="utf-8").splitlines(keepends=False)
except FileNotFoundError:
Expand All @@ -165,7 +165,6 @@ def _extract_data_from_file(
if is_zip_file:
return _extract_ini_from_zip(root, posix_path, multi_ini_keys=multi_ini_keys)
else:
output_data_path = root / inside_root_path
try:
reader = IniReader(multi_ini_keys)
return reader.read(output_data_path)
Expand Down
23 changes: 19 additions & 4 deletions antarest/study/storage/rawstudy/model/filesystem/ini_file_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,29 @@ def _get(
else:
data = self.reader.read(self.path, **kwargs)

data = self._handle_urls(data, depth, url)
return t.cast(SUB_JSON, data)

@staticmethod
def _handle_urls(data: t.Dict[str, t.Any], depth: int, url: t.List[str]) -> t.Dict[str, t.Any]:
if len(url) == 2:
data = data[url[0]][url[1]]
if url[0] in data and url[1] in data[url[0]]:
data = data[url[0]][url[1]]
else:
# lower keys to find a match
data = {k.lower(): v for k, v in {k.lower(): v for k, v in data.items()}[url[0].lower()].items()}[
url[1].lower()
]
elif len(url) == 1:
data = data[url[0]]
if url[0] in data:
data = data[url[0]]
else:
# lower keys to find a match
data = {k.lower(): v for k, v in data.items()}[url[0].lower()]

else:
data = {k: {} for k in data} if depth == 1 else data

return t.cast(SUB_JSON, data)
return data

# noinspection PyMethodMayBeStatic
def _get_filtering_kwargs(self, url: t.List[str]) -> t.Dict[str, str]:
Expand Down
104 changes: 56 additions & 48 deletions antarest/study/storage/study_download_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@
ExportFormat,
MatrixAggregationResult,
MatrixAggregationResultDTO,
MatrixIndex,
StudyDownloadDTO,
StudyDownloadLevelDTO,
StudyDownloadType,
TimeSerie,
TimeSeriesData,
)
from antarest.study.storage.rawstudy.model.filesystem.config.model import Area, EnrModelling, FileStudyTreeConfig
from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy
Expand Down Expand Up @@ -329,7 +331,6 @@ def export(
target_file: Path,
) -> None:
if filetype == ExportFormat.JSON:
# 1- JSON
with open(target_file, "w") as fh:
json.dump(
matrix.dict(),
Expand All @@ -340,46 +341,51 @@ def export(
separators=(",", ":"),
)
else:
# 1- Zip/tar+gz container
with (
ZipFile(target_file, "w", ZIP_DEFLATED) # type: ignore
if filetype == ExportFormat.ZIP
else tarfile.open(target_file, mode="w:gz")
) as output_data:
# 2 - Create CSV files
StudyDownloader.write_inside_archive(target_file, filetype, matrix)

@staticmethod
def write_inside_archive(path: Path, file_type: ExportFormat, matrix: MatrixAggregationResultDTO) -> None:
if file_type == ExportFormat.ZIP:
with ZipFile(path, "w", ZIP_DEFLATED) as f:
for ts_data in matrix.data:
output = StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONE)
nb_rows, csv_titles = StudyDownloader.export_infos(ts_data.data)
if nb_rows == -1:
raise ExportException(f"Outputs export: No rows for {ts_data.name} CSV")
writer.writerow(csv_titles)
row_date = datetime.strptime(matrix.index.start_date, "%Y-%m-%d %H:%M:%S")
for year in ts_data.data:
for i in range(0, nb_rows):
columns = ts_data.data[year]
csv_row: List[Optional[Union[int, float, str]]] = [
str(row_date),
int(year),
]
csv_row.extend([column_data.data[i] for column_data in columns])
writer.writerow(csv_row)
if matrix.index.level == StudyDownloadLevelDTO.WEEKLY and i == 0:
row_date = row_date + timedelta(days=matrix.index.first_week_size)
else:
row_date = matrix.index.level.inc_date(row_date)

bytes_data = str.encode(output.getvalue(), "utf-8")
if isinstance(output_data, ZipFile):
output_data.writestr(f"{ts_data.name}.csv", bytes_data)
else:
data_file = BytesIO(bytes_data)
data_file.seek(0, os.SEEK_END)
file_size = data_file.tell()
data_file.seek(0)
info = tarfile.TarInfo(name=f"{ts_data.name}.csv")
info.size = file_size
output_data.addfile(tarinfo=info, fileobj=data_file)
bytes_to_writes = StudyDownloader.create_csv_file(ts_data, matrix.index)
f.writestr(f"{ts_data.name}.csv", bytes_to_writes)
else:
with tarfile.open(path, mode="w:gz") as f:
for ts_data in matrix.data:
bytes_to_writes = StudyDownloader.create_csv_file(ts_data, matrix.index)
data_file = BytesIO(bytes_to_writes)
data_file.seek(0, os.SEEK_END)
file_size = data_file.tell()
data_file.seek(0)
info = tarfile.TarInfo(name=f"{ts_data.name}.csv")
info.size = file_size
f.addfile(tarinfo=info, fileobj=data_file)

@staticmethod
def create_csv_file(ts_data: TimeSeriesData, index: MatrixIndex) -> bytes:
output = StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONE)
nb_rows, csv_titles = StudyDownloader.export_infos(ts_data.data)
if nb_rows == -1:
raise ExportException(f"Outputs export: No rows for {ts_data.name} CSV")
writer.writerow(csv_titles)
row_date = datetime.strptime(index.start_date, "%Y-%m-%d %H:%M:%S")
for year in ts_data.data:
for i in range(0, nb_rows):
columns = ts_data.data[year]
csv_row: List[Optional[Union[int, float, str]]] = [
str(row_date),
int(year),
]
csv_row.extend([column_data.data[i] for column_data in columns])
writer.writerow(csv_row)
if index.level == StudyDownloadLevelDTO.WEEKLY and i == 0:
row_date = row_date + timedelta(days=index.first_week_size)
else:
row_date = index.level.inc_date(row_date)

return str.encode(output.getvalue(), "utf-8")


class BadOutputFormat(HTTPException):
Expand Down Expand Up @@ -463,21 +469,23 @@ def get_output_variables_information(study: FileStudy, output_name: str) -> Dict
"link": [],
}

try:
output_variables["area"] = get_output_variables(first_year_result["areas"], 1)
except BadOutputFormat:
logger.warning(f"Failed to retrieve output variables in {study.config.study_id} ({output_name}) for areas")
if "areas" in first_year_result:
try:
output_variables["area"] = get_output_variables(first_year_result["areas"], 1)
except BadOutputFormat:
logger.warning(f"Failed to retrieve output variables in {study.config.study_id} ({output_name}) for areas")

if len(output_variables["area"]) == 0 and "areas" in mc_all_result:
try:
output_variables["area"] = get_output_variables(mc_all_result["areas"], 1)
except BadOutputFormat:
logger.warning(f"Failed to retrieve output variables in {study.config.study_id} ({output_name}) for areas")

try:
output_variables["link"] = get_output_variables(first_year_result["links"], 2)
except BadOutputFormat:
logger.warning(f"Failed to retrieve output variables in {study.config.study_id} ({output_name}) for links")
if "links" in first_year_result:
try:
output_variables["link"] = get_output_variables(first_year_result["links"], 2)
except BadOutputFormat:
logger.warning(f"Failed to retrieve output variables in {study.config.study_id} ({output_name}) for links")

if len(output_variables["link"]) == 0 and "links" in mc_all_result:
try:
Expand Down
9 changes: 5 additions & 4 deletions antarest/study/storage/variantstudy/variant_study_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -569,8 +569,9 @@ def generate_task(
denormalize: bool = False,
from_scratch: bool = False,
) -> str:
with FileLock(str(self.config.storage.tmp_dir / f"study-generation-{metadata.id}.lock")):
logger.info(f"Starting variant study {metadata.id} generation")
study_id = metadata.id
with FileLock(str(self.config.storage.tmp_dir / f"study-generation-{study_id}.lock")):
logger.info(f"Starting variant study {study_id} generation")
self.repository.refresh(metadata)
if metadata.generation_task:
try:
Expand All @@ -579,11 +580,11 @@ def generate_task(
RequestParameters(DEFAULT_ADMIN_USER),
)
if not previous_task.status.is_final():
logger.info(f"Returning already existing variant study {metadata.id} generation")
logger.info(f"Returning already existing variant study {study_id} generation")
return str(metadata.generation_task)
except HTTPException as e:
logger.warning(
f"Failed to retrieve generation task for study {metadata.id}",
f"Failed to retrieve generation task for study {study_id}",
exc_info=e,
)

Expand Down
Loading

0 comments on commit b491c80

Please sign in to comment.