text
stringlengths 0
24.9k
|
|---|
import json # type: ignore from .._config import _Config from ..exceptions.exceptions import LoadingError from ._base_serializer import _BaseSerializer class _JsonSerializer(_BaseSerializer): """Convert configuration from JSON representation to Python Dict and reciprocally.""" @classmethod def _write(cls, configuration: _Config, filename: str): with open(filename, "w") as fd: json.dump(cls._str(configuration), fd, ensure_ascii=False, indent=0, check_circular=False) @classmethod def _read(cls, filename: str) -> _Config: try: with open(filename) as f: config_as_dict = cls._pythonify(json.load(f)) return cls._from_dict(config_as_dict) except json.JSONDecodeError as e: error_msg = f"Can not load configuration {e}" raise LoadingError(error_msg) @classmethod def _serialize(cls, configuration: _Config) -> str: return json.dumps(cls._str(configuration), ensure_ascii=False, indent=0, check_circular=False) @classmethod def _deserialize(cls, config_as_string: str) -> _Config: return cls._from_dict(cls._pythonify(dict(json.loads(config_as_string))))
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from typing import Any, List from .issue import Issue class IssueCollector: """ A collection of issues (instances of class `Issue^`). Attributes: errors (List[Issue^]): List of ERROR issues collected. warnings (List[Issue^]): List WARNING issues collected. infos (List[Issue^]): List INFO issues collected. all (List[Issue^]): List of all issues collected ordered by decreasing level (ERROR, WARNING and INFO). """ _ERROR_LEVEL = "ERROR" _WARNING_LEVEL = "WARNING" _INFO_LEVEL = "INFO" def __init__(self): self._errors: List[Issue] = [] self._warnings: List[Issue] = [] self._infos: List[Issue] = [] @property def all(self) -> List[Issue]: return self._errors + self._warnings + self._infos @property def infos(self) -> List[Issue]: return self._infos @property def warnings(self) -> List[Issue]: return self._warnings @property def errors(self) -> List[Issue]: return self._errors def _add_error(self, field: str, value: Any, message: str, checker_name: str): self._errors.append(Issue(self._ERROR_LEVEL, field, value, message, checker_name)) def _add_warning(self, field: str, value: Any, message: str, checker_name: str): self._warnings.append(Issue(self._WARNING_LEVEL, field, value, message, checker_name)) def _add_info(self, field: str, value: Any, message: str, checker_name: str): self._infos.append(Issue(self._INFO_LEVEL, field, value, message, checker_name))
|
from dataclasses import dataclass from typing import Any, Optional @dataclass class Issue: """ An issue detected in the configuration. Attributes: level (str): Level of the issue among ERROR, WARNING, INFO. field (str): Configuration field on which the issue has been detected. value (Any): Value of the field on which the issue has been detected. message (str): Human readable message to help the user fix the issue. tag (Optional[str]): Optional tag to be used to filter issues. """ level: str field: str value: Any message: str tag: Optional[str] def __str__(self) -> str: message = self.message if self.value: current_value_str = f'"{self.value}"' if isinstance(self.value, str) else f"{self.value}" message += f" Current value of property `{self.field}` is {current_value_str}." return message
|
from typing import List from ._checkers._config_checker import _ConfigChecker from .issue_collector import IssueCollector class _Checker: """Holds the various checkers to perform on the config.""" _checkers: List[_ConfigChecker] = [] @classmethod def _check(cls, _applied_config): collector = IssueCollector() for checker in cls._checkers: checker(_applied_config, collector)._check() return collector @classmethod def add_checker(cls, checker_class: _ConfigChecker): cls._checkers.append(checker_class)
|
import abc from typing import Any, List, Optional, Set from ..._config import _Config from ..issue_collector import IssueCollector class _ConfigChecker: _PREDEFINED_PROPERTIES_KEYS = ["_entity_owner"] def __init__(self, config: _Config, collector): self._collector = collector self._config = config @abc.abstractmethod def _check(self) -> IssueCollector: raise NotImplementedError def _error(self, field: str, value: Any, message: str): self._collector._add_error(field, value, message, self.__class__.__name__) def _warning(self, field: str, value: Any, message: str): self._collector._add_warning(field, value, message, self.__class__.__name__) def _info(self, field: str, value: Any, message: str): self._collector._add_info(field, value, message, self.__class__.__name__) def _check_children( self, parent_config_class, config_id: str, config_key: str, config_value, child_config_class, can_be_empty: Optional[bool] = False, ): if not config_value and not can_be_empty: self._warning( config_key, config_value, f"{config_key} field of {parent_config_class.__name__} `{config_id}` is empty.", ) else: if not ( (isinstance(config_value, List) or isinstance(config_value, Set)) and all(map(lambda x: isinstance(x, child_config_class), config_value)) ): self._error( config_key, config_value, f"{config_key} field of {parent_config_class.__name__} `{config_id}` must be populated with a list " f"of {child_config_class.__name__} objects.", ) def _check_existing_config_id(self, config): if not config.id: self._error( "config_id", config.id, f"config_id of {config.__class__.__name__} `{config.id}` is empty.", ) def _check_if_entity_property_key_used_is_predefined(self, config): for key, value in config._properties.items(): if key in self._PREDEFINED_PROPERTIES_KEYS: self._error( key, value, f"Properties of {config.__class__.__name__} `{config.id}` cannot have `{key}` as its property.", )
|
from ..._config import _Config from ..issue_collector import IssueCollector from ._config_checker import _ConfigChecker class _AuthConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: auth_config = self._config._auth_config self._check_predefined_protocol(auth_config) return self._collector def _check_predefined_protocol(self, auth_config): if auth_config.protocol == auth_config._PROTOCOL_LDAP: self.__check_ldap(auth_config) if auth_config.protocol == auth_config._PROTOCOL_TAIPY: self.__check_taipy(auth_config) def __check_taipy(self, auth_config): if auth_config._TAIPY_ROLES not in auth_config.properties: self._error( "properties", auth_config._LDAP_SERVER, f"`{auth_config._LDAP_SERVER}` property must be populated when {auth_config._PROTOCOL_LDAP} is used.", ) if auth_config._TAIPY_PWD not in auth_config.properties: self._warning( "properties", auth_config._TAIPY_PWD, f"`In order to protect authentication with passwords using {auth_config._PROTOCOL_TAIPY} protocol," f" {auth_config._TAIPY_PWD}` property can be populated.", ) def __check_ldap(self, auth_config): if auth_config._LDAP_SERVER not in auth_config.properties: self._error( "properties", auth_config._LDAP_SERVER, f"`{auth_config._LDAP_SERVER}` attribute must be populated when {auth_config._PROTOCOL_LDAP} is used.", ) if auth_config._LDAP_BASE_DN not in auth_config.properties: self._error( "properties", auth_config._LDAP_BASE_DN, f"`{auth_config._LDAP_BASE_DN}` field must be populated when {auth_config._PROTOCOL_LDAP} is used.", )
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .exceptions import *
|
class LoadingError(Exception): """Raised if an error occurs while loading the configuration file.""" class InconsistentEnvVariableError(Exception): """Inconsistency value has been detected in an environment variable referenced by the configuration.""" class MissingEnvVariableError(Exception): """Environment variable referenced in configuration is missing.""" class InvalidConfigurationId(Exception): """Configuration id is not valid.""" class ConfigurationUpdateBlocked(Exception): """The configuration is being blocked from update by other Taipy services."""
|
from ..common._repr_enum import _ReprEnum class Frequency(_ReprEnum): """Frequency of the recurrence of `Cycle^` and `Scenario^` objects. The frequency must be provided in the `ScenarioConfig^`. Each recurrent scenario is attached to the cycle corresponding to the creation date and the frequency. In other words, each cycle represents an iteration and contains the various scenarios created during this iteration. For instance, when scenarios have a _MONTHLY_ frequency, one cycle will be created for each month (January, February, March, etc.). A new scenario created on February 10th, gets attached to the _February_ cycle. The frequency is implemented as an enumeration with the following possible values: - With a _DAILY_ frequency, a new cycle is created for each day. - With a _WEEKLY_ frequency, a new cycle is created for each week (from Monday to Sunday). - With a _MONTHLY_ frequency, a new cycle is created for each month. - With a _QUARTERLY_ frequency, a new cycle is created for each quarter. - With a _YEARLY_ frequency, a new cycle is created for each year. """ DAILY = 1 WEEKLY = 2 MONTHLY = 3 QUARTERLY = 4 YEARLY = 5
|
class _Classproperty(object): def __init__(self, f): self.f = f def __get__(self, obj, owner): return self.f(owner)
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import functools from enum import Enum class _ReprEnum(Enum): @classmethod @functools.lru_cache def _from_repr(cls, repr_: str): return next(filter(lambda e: repr(e) == repr_, cls)) # type: ignore
|
import keyword from ..exceptions.exceptions import InvalidConfigurationId __INVALID_TAIPY_ID_TERMS = ["CYCLE", "SCENARIO", "SEQUENCE", "TASK", "DATANODE"] def _validate_id(name: str): for invalid_taipy_id_term in __INVALID_TAIPY_ID_TERMS: if invalid_taipy_id_term in name: raise InvalidConfigurationId(f"{name} is not a valid identifier. {invalid_taipy_id_term} is restricted.") if name.isidentifier() and not keyword.iskeyword(name): return name raise InvalidConfigurationId(f"{name} is not a valid identifier.")
|
import functools from ...logger._taipy_logger import _TaipyLogger from ..exceptions.exceptions import ConfigurationUpdateBlocked class _ConfigBlocker: """Configuration blocker singleton.""" __logger = _TaipyLogger._get_logger() __block_config_update = False @classmethod def _block(cls): cls.__block_config_update = True @classmethod def _unblock(cls): cls.__block_config_update = False @classmethod def _check(cls): def inner(f): @functools.wraps(f) def _check_if_is_blocking(*args, **kwargs): if cls.__block_config_update: error_message = ( "The Core service should be stopped by running core.stop() before" " modifying the Configuration. For more information, please refer to:" " https://docs.taipy.io/en/latest/manuals/running_services/#running-core." ) cls.__logger.error("ConfigurationUpdateBlocked: " + error_message) raise ConfigurationUpdateBlocked(error_message) return f(*args, **kwargs) return _check_if_is_blocking return inner
|
from ..common._repr_enum import _ReprEnum class _OrderedEnum(_ReprEnum): def __ge__(self, other): if self.__class__ is other.__class__: return self.value >= other.value return NotImplemented def __gt__(self, other): if self.__class__ is other.__class__: return self.value > other.value return NotImplemented def __le__(self, other): if self.__class__ is other.__class__: return self.value <= other.value return NotImplemented def __lt__(self, other): if self.__class__ is other.__class__: return self.value < other.value return NotImplemented class Scope(_OrderedEnum): """Scope of a `DataNode^`. This enumeration can have the following values: - `GLOBAL` - `CYCLE` - `SCENARIO` """ GLOBAL = 3 CYCLE = 2 SCENARIO = 1
|
import os import re from collections import UserDict from datetime import datetime, timedelta from importlib import import_module from operator import attrgetter from pydoc import locate from ..exceptions.exceptions import InconsistentEnvVariableError, MissingEnvVariableError from .frequency import Frequency from .scope import Scope class _TemplateHandler: """Factory to handle actions related to config value templating.""" _PATTERN = r"^ENV\[([a-zA-Z_]\w*)\](:(\bbool\b|\bstr\b|\bfloat\b|\bint\b))?$" @classmethod def _replace_templates(cls, template, type=str, required=True, default=None): if isinstance(template, tuple): return tuple(cls._replace_template(item, type, required, default) for item in template) if isinstance(template, list): return [cls._replace_template(item, type, required, default) for item in template] if isinstance(template, dict): return {str(k): cls._replace_template(v, type, required, default) for k, v in template.items()} if isinstance(template, UserDict): return {str(k): cls._replace_template(v, type, required, default) for k, v in template.items()} return cls._replace_template(template, type, required, default) @classmethod def _replace_template(cls, template, type, required, default): if "ENV" not in str(template): return template match = re.fullmatch(cls._PATTERN, str(template)) if match: var = match.group(1) dynamic_type = match.group(3) val = os.environ.get(var) if val is None: if required: raise MissingEnvVariableError(f"Environment variable {var} is not set.") return default if type == bool: return cls._to_bool(val) elif type == int: return cls._to_int(val) elif type == float: return cls._to_float(val) elif type == Scope: return cls._to_scope(val) elif type == Frequency: return cls._to_frequency(val) else: if dynamic_type == "bool": return cls._to_bool(val) elif dynamic_type == "int": return cls._to_int(val) elif dynamic_type == "float": return cls._to_float(val) return val return template @staticmethod def _to_bool(val: str) -> bool: possible_values = ["true", "false"] if str.lower(val) not in possible_values: raise InconsistentEnvVariableError("{val} is not a Boolean.") return str.lower(val) == "true" or not (str.lower(val) == "false") @staticmethod def _to_int(val: str) -> int: try: return int(val) except ValueError: raise InconsistentEnvVariableError(f"{val} is not an integer.") @staticmethod def _to_float(val: str) -> float: try: return float(val) except ValueError: raise InconsistentEnvVariableError(f"{val} is not a float.") @staticmethod def _to_datetime(val: str) -> datetime: try: return datetime.fromisoformat(val) except ValueError: raise InconsistentEnvVariableError(f"{val} is not a valid datetime.") @staticmethod def _to_timedelta(val: str) -> timedelta: """ Parse a time string e.g. (2h13m) into a timedelta object. :param timedelta_str: A string identifying a duration. (eg. 2h13m) :return datetime.timedelta: A datetime.timedelta object """ regex = re.compile( r"^((?P<days>[\.\d]+?)d)? *" r"((?P<hours>[\.\d]+?)h)? *" r"((?P<minutes>[\.\d]+?)m)? *" r"((?P<seconds>[\.\d]+?)s)?$" ) parts = regex.match(val) if not parts: raise InconsistentEnvVariableError(f"{val} is not a valid timedelta.") time_params = {name: float(param) for name, param in parts.groupdict().items() if param} return timedelta(**time_params) # type: ignore @staticmethod def _to_scope(val: str) -> Scope: try: return Scope[str.upper(val)] except Exception: raise InconsistentEnvVariableError(f"{val} is not a valid scope.") @staticmethod def _to_frequency(val: str) -> Frequency: try: return Frequency[str.upper(val)] except Exception: raise InconsistentEnvVariableError(f"{val} is not a valid frequency.") @staticmethod def _to_function(val: str): module_name, fct_name = val.rsplit(".", 1) try: module = import_module(module_name) return attrgetter(fct_name)(module) except Exception: raise InconsistentEnvVariableError(f"{val} is not a valid function.") @staticmethod def _to_class(val: str): try: return locate(val) except Exception: raise InconsistentEnvVariableError(f"{val} is not a valid class.")
|
from __future__ import annotations from typing import Any, Dict, Optional, Union from ..common._config_blocker import _ConfigBlocker from ..common._template_handler import _TemplateHandler as _tpl class GlobalAppConfig: """ Configuration fields related to the global application. Attributes: **properties (Dict[str, Any]): A dictionary of additional properties. """ def __init__(self, **properties): self._properties = properties @property def properties(self): return {k: _tpl._replace_templates(v) for k, v in self._properties.items()} @properties.setter # type: ignore @_ConfigBlocker._check() def properties(self, val): self._properties = val def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) @classmethod def default_config(cls) -> GlobalAppConfig: return GlobalAppConfig() def _clean(self): self._properties.clear() def _to_dict(self): as_dict = {} as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, config_as_dict: Dict[str, Any]): config = GlobalAppConfig() config._properties = config_as_dict return config def _update(self, config_as_dict): self._properties.update(config_as_dict)
|
import re from typing import Dict, List, Set from .._serializer._json_serializer import _JsonSerializer class _ComparatorResult(dict): ADDED_ITEMS_KEY = "added_items" REMOVED_ITEMS_KEY = "removed_items" MODIFIED_ITEMS_KEY = "modified_items" CONFLICTED_SECTION_KEY = "conflicted_sections" UNCONFLICTED_SECTION_KEY = "unconflicted_sections" def __init__(self, unconflicted_sections: Set[str]): super().__init__() self._unconflicted_sections = unconflicted_sections def _sort_by_section(self): if self.get(self.CONFLICTED_SECTION_KEY): for key in self[self.CONFLICTED_SECTION_KEY].keys(): self[self.CONFLICTED_SECTION_KEY][key].sort(key=lambda x: x[0][0]) if self.get(self.UNCONFLICTED_SECTION_KEY): for key in self[self.UNCONFLICTED_SECTION_KEY].keys(): self[self.UNCONFLICTED_SECTION_KEY][key].sort(key=lambda x: x[0][0]) def _check_added_items(self, config_deepdiff, new_json_config): if dictionary_item_added := config_deepdiff.get("dictionary_item_added"): for item_added in dictionary_item_added: section_name, config_id, attribute = self.__get_changed_entity_attribute(item_added) diff_sections = self.__get_section(section_name) if attribute: value_added = new_json_config[section_name][config_id][attribute] elif config_id: value_added = new_json_config[section_name][config_id] else: value_added = new_json_config[section_name] section_name = self.__rename_global_node_name(section_name) self.__create_or_append_list( diff_sections, self.ADDED_ITEMS_KEY, ((section_name, config_id, attribute), (value_added)), ) def _check_removed_items(self, config_deepdiff, old_json_config): if dictionary_item_removed := config_deepdiff.get("dictionary_item_removed"): for item_removed in dictionary_item_removed: section_name, config_id, attribute = self.__get_changed_entity_attribute(item_removed) diff_sections = self.__get_section(section_name) if attribute: value_removed = old_json_config[section_name][config_id][attribute] elif config_id: value_removed = old_json_config[section_name][config_id] else: value_removed = old_json_config[section_name] section_name = self.__rename_global_node_name(section_name) self.__create_or_append_list( diff_sections, self.REMOVED_ITEMS_KEY, ((section_name, config_id, attribute), (value_removed)), ) def _check_modified_items(self, config_deepdiff, old_json_config, new_json_config): if values_changed := config_deepdiff.get("values_changed"): for item_changed, value_changed in values_changed.items(): section_name, config_id, attribute = self.__get_changed_entity_attribute(item_changed) diff_sections = self.__get_section(section_name) section_name = self.__rename_global_node_name(section_name) self.__create_or_append_list( diff_sections, self.MODIFIED_ITEMS_KEY, ((section_name, config_id, attribute), (value_changed["old_value"], value_changed["new_value"])), ) # Iterable item added will be considered a modified item if iterable_item_added := config_deepdiff.get("iterable_item_added"): self.__check_modified_iterable(iterable_item_added, old_json_config, new_json_config) # Iterable item removed will be considered a modified item if iterable_item_removed := config_deepdiff.get("iterable_item_removed"): self.__check_modified_iterable(iterable_item_removed, old_json_config, new_json_config) def __check_modified_iterable(self, iterable_items, old_json_config, new_json_config): for item in iterable_items: section_name, config_id, attribute = self.__get_changed_entity_attribute(item) diff_sections = self.__get_section(section_name) if attribute: new_value = new_json_config[section_name][config_id][attribute] old_value = old_json_config[section_name][config_id][attribute] else: new_value = new_json_config[section_name][config_id] old_value = old_json_config[section_name][config_id] section_name = self.__rename_global_node_name(section_name) modified_value = ((section_name, config_id, attribute), (old_value, new_value)) if ( not diff_sections.get(self.MODIFIED_ITEMS_KEY) or modified_value not in diff_sections[self.MODIFIED_ITEMS_KEY] ): self.__create_or_append_list( diff_sections, self.MODIFIED_ITEMS_KEY, modified_value, ) def __get_section(self, section_name: str) -> Dict[str, List]: if section_name in self._unconflicted_sections: if not self.get(self.UNCONFLICTED_SECTION_KEY): self[self.UNCONFLICTED_SECTION_KEY] = {} return self[self.UNCONFLICTED_SECTION_KEY] if not self.get(self.CONFLICTED_SECTION_KEY): self[self.CONFLICTED_SECTION_KEY] = {} return self[self.CONFLICTED_SECTION_KEY] def __create_or_append_list(self, diff_dict, key, value): if diff_dict.get(key): diff_dict[key].append(value) else: diff_dict[key] = [value] def __get_changed_entity_attribute(self, attribute_bracket_notation): """Split the section name, the config id (if exists), and the attribute name (if exists) from JSON bracket notation. """ try: section_name, config_id, attribute = re.findall(r"\[\'(.*?)\'\]", attribute_bracket_notation) except ValueError: try: section_name, config_id = re.findall(r"\[\'(.*?)\'\]", attribute_bracket_notation) attribute = None except ValueError: section_name = re.findall(r"\[\'(.*?)\'\]", attribute_bracket_notation)[0] config_id = None attribute = None return section_name, config_id, attribute def __rename_global_node_name(self, node_name): if node_name == _JsonSerializer._GLOBAL_NODE_NAME: return "Global Configuration" return node_name
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import json from copy import copy from typing import Optional, Set, Union from deepdiff import DeepDiff from ...logger._taipy_logger import _TaipyLogger from .._config import _Config from .._serializer._json_serializer import _JsonSerializer from ._comparator_result import _ComparatorResult class _ConfigComparator: def __init__(self): self._unconflicted_sections: Set[str] = set() self.__logger = _TaipyLogger._get_logger() def _add_unconflicted_section(self, section_name: Union[str, Set[str]]): if isinstance(section_name, str): section_name = {section_name} self._unconflicted_sections.update(section_name) def _find_conflict_config( self, old_config: _Config, new_config: _Config, old_version_number: Optional[str] = None, new_version_number: Optional[str] = None, ): """Compare between 2 _Config object to check for compatibility. Args: old_config (_Config): The old _Config. new_config (_Config): The new _Config. old_version_number (str, optional): The old version number for logging. Defaults to None. new_version_number (str, optional): The new version number for logging. Defaults to None. Returns: _ComparatorResult: Return a _ComparatorResult dictionary with the following format: ```python { "added_items": [ ((section_name_1, config_id_1, attribute_1), added_object_1), ((section_name_2, config_id_2, attribute_2), added_object_2), ], "removed_items": [ ((section_name_1, config_id_1, attribute_1), removed_object_1), ((section_name_2, config_id_2, attribute_2), removed_object_2), ], "modified_items": [ ((section_name_1, config_id_1, attribute_1), (old_value_1, new_value_1)), ((section_name_2, config_id_2, attribute_2), (old_value_2, new_value_2)), ], } ``` """ comparator_result = self.__get_config_diff(old_config, new_config) self.__log_find_conflict_message(comparator_result, old_version_number, new_version_number) return comparator_result def _compare( self, config_1: _Config, config_2: _Config, version_number_1: str, version_number_2: str, ): """Compare between 2 _Config object to check for compatibility. Args: config_1 (_Config): The old _Config. config_2 (_Config): The new _Config. version_number_1 (str): The old version number for logging. version_number_2 (str): The new version number for logging. """ comparator_result = self.__get_config_diff(config_1, config_2) self.__log_comparison_message(comparator_result, version_number_1, version_number_2) return comparator_result def __get_config_diff(self, config_1, config_2): json_config_1 = json.loads(_JsonSerializer._serialize(config_1)) json_config_2 = json.loads(_JsonSerializer._serialize(config_2)) config_deepdiff = DeepDiff(json_config_1, json_config_2, ignore_order=True) comparator_result = _ComparatorResult(copy(self._unconflicted_sections)) comparator_result._check_added_items(config_deepdiff, json_config_2) comparator_result._check_removed_items(config_deepdiff, json_config_1) comparator_result._check_modified_items(config_deepdiff, json_config_1, json_config_2) comparator_result._sort_by_section() return comparator_result def __log_comparison_message( self, comparator_result: _ComparatorResult, version_number_1: str, version_number_2: str, ): config_str_1 = f"version {version_number_1} Configuration" config_str_2 = f"version {version_number_2} Configuration" diff_messages = [] for _, sections in comparator_result.items(): diff_messages = self.__get_messages(sections) if diff_messages: self.__logger.info( f"Differences between {config_str_1} and {config_str_2}:\n\t" + "\n\t".join(diff_messages) ) else: self.__logger.info(f"There is no difference between {config_str_1} and {config_str_2}.") def __log_find_conflict_message( self, comparator_result: _ComparatorResult, old_version_number: Optional[str] = None, new_version_number: Optional[str] = None, ): old_config_str = ( f"configuration for version {old_version_number}" if old_version_number else "current configuration" ) new_config_str = ( f"configuration for version {new_version_number}" if new_version_number else "current configuration" ) if unconflicted_sections := comparator_result.get(_ComparatorResult.UNCONFLICTED_SECTION_KEY): unconflicted_messages = self.__get_messages(unconflicted_sections) self.__logger.info( f"There are non-conflicting changes between the {old_config_str}" f" and the {new_config_str}:\n\t" + "\n\t".join(unconflicted_messages) ) if conflicted_sections := comparator_result.get(_ComparatorResult.CONFLICTED_SECTION_KEY): conflicted_messages = self.__get_messages(conflicted_sections) self.__logger.error( f"The {old_config_str} conflicts with the {new_config_str}:\n\t" + "\n\t".join(conflicted_messages) ) def __get_messages(self, diff_sections): dq = '"' messages = [] if added_items := diff_sections.get(_ComparatorResult.ADDED_ITEMS_KEY): for diff in added_items: ((section_name, config_id, attribute), added_object) = diff messages.append( f"{section_name} {dq}{config_id}{dq} " f"{f'has attribute {dq}{attribute}{dq}' if attribute else 'was'} added: {added_object}" ) if removed_items := diff_sections.get(_ComparatorResult.REMOVED_ITEMS_KEY): for diff in removed_items: ((section_name, config_id, attribute), removed_object) = diff messages.append( f"{section_name} {dq}{config_id}{dq} " f"{f'has attribute {dq}{attribute}{dq}' if attribute else 'was'} removed" ) if modified_items := diff_sections.get(_ComparatorResult.MODIFIED_ITEMS_KEY): for diff in modified_items: ((section_name, config_id, attribute), (old_value, new_value)) = diff messages.append( f"{section_name} {dq}{config_id}{dq} " f"{f'has attribute {dq}{attribute}{dq}' if attribute else 'was'} modified: " f"{old_value} -> {new_value}" ) return messages
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import argparse from typing import Dict class _CLI: """Argument parser for Taipy application.""" # The conflict_handler is set to "resolve" to override conflict arguments _subparser_action = None _parser = argparse.ArgumentParser(conflict_handler="resolve") _sub_taipyparsers: Dict[str, argparse.ArgumentParser] = {} _arg_groups: Dict[str, argparse._ArgumentGroup] = {} @classmethod def _add_subparser(cls, name: str, **kwargs) -> argparse.ArgumentParser: """Create a new subparser and return a argparse handler.""" if subparser := cls._sub_taipyparsers.get(name): return subparser if not cls._subparser_action: cls._subparser_action = cls._parser.add_subparsers() subparser = cls._subparser_action.add_parser( name=name, conflict_handler="resolve", **kwargs, ) cls._sub_taipyparsers[name] = subparser subparser.set_defaults(which=name) return subparser @classmethod def _add_groupparser(cls, title: str, description: str = "") -> argparse._ArgumentGroup: """Create a new group for arguments and return a argparser handler.""" if groupparser := cls._arg_groups.get(title): return groupparser groupparser = cls._parser.add_argument_group(title=title, description=description) cls._arg_groups[title] = groupparser return groupparser @classmethod def _parse(cls): """Parse and return only known arguments.""" args, _ = cls._parser.parse_known_args() return args @classmethod def _remove_argument(cls, arg: str): """Remove an argument from the parser. Note that the `arg` must be without --. Source: https://stackoverflow.com/questions/32807319/disable-remove-argument-in-argparse """ for action in cls._parser._actions: opts = action.option_strings if (opts and opts[0] == arg) or action.dest == arg: cls._parser._remove_action(action) break for argument_group in cls._parser._action_groups: for group_action in argument_group._group_actions: opts = group_action.option_strings if (opts and opts[0] == arg) or group_action.dest == arg: argument_group._group_actions.remove(group_action) return
|
from ._cli import _CLI
|
"""The setup script.""" import json import os from setuptools import find_namespace_packages, find_packages, setup with open("README.md", "rb") as readme_file: readme = readme_file.read().decode("UTF-8") with open(f"src{os.sep}taipy{os.sep}templates{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" test_requirements = ["pytest>=3.8"] setup( author="Avaiga", author_email="dev@taipy.io", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", ], description="An open-source package holding Taipy application templates.", license="Apache License 2.0", long_description=readme, long_description_content_type="text/markdown", keywords="taipy-templates", name="taipy-templates", package_dir={"": "src"}, packages=find_namespace_packages(where="src") + find_packages(include=["taipy"]), include_package_data=True, test_suite="tests", url="https://github.com/avaiga/taipy-templates", version=version_string, zip_safe=False, )
|
import os import pytest from cookiecutter.exceptions import FailedHookException from cookiecutter.main import cookiecutter from .utils import _run_template def test_default_answer(tmpdir): cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, ) assert os.listdir(tmpdir) == ["taipy_application"] assert ( os.listdir(os.path.join(tmpdir, "taipy_application")).sort() == ["requirements.txt", "main.py", "images"].sort() ) with open(os.path.join(tmpdir, "taipy_application", "requirements.txt")) as requirements_file: # Assert post_gen_project hook is successful assert "taipy==" in requirements_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "taipy_application")) stdout = _run_template("main.py") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert "[Taipy][INFO] * Server starting on" in str(stdout, "utf-8") def test_main_file_with_and_without_extension(tmpdir): cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, extra_context={ "Application main Python file": "app.py", }, ) assert ( os.listdir(os.path.join(tmpdir, "taipy_application")).sort() == ["requirements.txt", "app.py", "images"].sort() ) cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, extra_context={ "Application root folder name": "foo_app", "Application main Python file": "app", }, ) assert os.listdir(os.path.join(tmpdir, "foo_app")).sort() == ["requirements.txt", "app.py", "images"].sort() def test_with_core_service(tmpdir): cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, extra_context={ "Does the application use scenario management or version management?": "y", "Does the application use Rest API?": "no", }, ) assert ( os.listdir(os.path.join(tmpdir, "taipy_application")).sort() == ["requirements.txt", "main.py", "images", "configuration", "algorithms"].sort() ) with open(os.path.join(tmpdir, "taipy_application", "main.py")) as main_file: assert "core = Core()" in main_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "taipy_application")) stdout = _run_template("main.py") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert "[Taipy][INFO] * Server starting on" in str(stdout, "utf-8") assert "[Taipy][INFO] Development mode: " in str(stdout, "utf-8") def test_with_rest_service(tmpdir): cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, extra_context={ "Does the application use scenario management or version management?": "n", "Does the application use Rest API?": "yes", }, ) assert ( os.listdir(os.path.join(tmpdir, "taipy_application")).sort() == ["requirements.txt", "main.py", "images"].sort() ) with open(os.path.join(tmpdir, "taipy_application", "main.py")) as main_file: assert "rest = Rest()" in main_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "taipy_application")) stdout = _run_template("main.py") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert "[Taipy][INFO] * Server starting on" in str(stdout, "utf-8") assert "[Taipy][INFO] Development mode: " in str(stdout, "utf-8") def test_with_both_core_rest_services(tmpdir): cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, extra_context={ "Does the application use scenario management or version management?": "n", "Does the application use Rest API?": "yes", }, ) assert ( os.listdir(os.path.join(tmpdir, "taipy_application")).sort() == ["requirements.txt", "main.py", "images", "configuration", "algorithms"].sort() ) with open(os.path.join(tmpdir, "taipy_application", "main.py")) as main_file: assert "rest = Rest()" in main_file.read() assert "core = Core()" not in main_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "taipy_application")) stdout = _run_template("main.py") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert "[Taipy][INFO] * Server starting on" in str(stdout, "utf-8") assert "[Taipy][INFO] Development mode: " in str(stdout, "utf-8") def test_multipage_gui_template(tmpdir): cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, extra_context={ "Application root folder name": "foo_app", "Page names in multi-page application?": "name_1 name_2 name_3", }, ) assert ( os.listdir(os.path.join(tmpdir, "foo_app")).sort() == ["requirements.txt", "main.py", "pages", "images"].sort() ) assert ( os.listdir(os.path.join(tmpdir, "foo_app", "pages")).sort() == ["name_1", "name_2", "name_3", "root.md", "root.py", "__init__.py"].sort() ) oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "foo_app")) stdout = _run_template("main.py") os.chdir(oldpwd) assert "[Taipy][INFO] * Server starting on" in str(stdout, "utf-8") def test_multipage_gui_template_with_invalid_page_name(tmpdir, capfd): with pytest.raises(FailedHookException): cookiecutter( template="src/taipy/templates/default", output_dir=str(tmpdir), no_input=True, extra_context={ "Application root folder name": "foo_app", "Page names in multi-page application?": "valid_var_name 1_invalid_var_name", }, ) _, stderr = capfd.readouterr() assert 'Page name "1_invalid_var_name" is not a valid Python identifier' in stderr assert not os.path.exists(os.path.join(tmpdir, "foo_app"))
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import subprocess import sys def _run_template(main_path, time_out=30): """Run the templates on a subprocess and get stdout after timeout""" with subprocess.Popen([sys.executable, main_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: try: stdout, stderr = proc.communicate(timeout=time_out) except subprocess.TimeoutExpired: proc.kill() stdout, stderr = proc.communicate() # Print the eror if there is any (for debugging) if stderr := str(stderr, "utf-8"): print(stderr) return stdout
|
import os from cookiecutter.main import cookiecutter from .utils import _run_template def test_scenario_management_with_toml_config(tmpdir): cookiecutter( template="src/taipy/templates/scenario-management", output_dir=tmpdir, no_input=True, extra_context={ "Application root folder name": "foo_app", "Application main Python file": "main.py", "Application title": "bar", "Does the application use TOML Config?": "yes", }, ) assert os.listdir(tmpdir) == ["foo_app"] assert ( os.listdir(os.path.join(tmpdir, "foo_app")).sort() == ["requirements.txt", "main.py", "algos", "config", "pages"].sort() ) # Assert post_gen_project hook is successful with open(os.path.join(tmpdir, "foo_app", "requirements.txt")) as requirements_file: assert "taipy==" in requirements_file.read() assert ( os.listdir(os.path.join(tmpdir, "foo_app", "config")).sort() == ["__init__.py", "config.py", "config.toml"].sort() ) with open(os.path.join(tmpdir, "foo_app", "config", "config.py")) as config_file: assert 'Config.load("config/config.toml")' in config_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "foo_app")) stdout = _run_template("main.py") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert "[Taipy][INFO] Configuration 'config/config.toml' successfully loaded." in str(stdout, "utf-8") assert "[Taipy][INFO] * Server starting on" in str(stdout, "utf-8") def test_scenario_management_without_toml_config(tmpdir): cookiecutter( template="src/taipy/templates/scenario-management", output_dir=tmpdir, no_input=True, extra_context={ "Application root folder name": "foo_app", "Application main Python file": "main.py", "Application title": "bar", "Does the application use TOML Config?": "no", }, ) assert os.listdir(tmpdir) == ["foo_app"] assert ( os.listdir(os.path.join(tmpdir, "foo_app")).sort() == ["requirements.txt", "main.py", "algos", "config", "pages"].sort() ) # Assert post_gen_project hook is successful with open(os.path.join(tmpdir, "foo_app", "requirements.txt")) as requirements_file: assert "taipy==" in requirements_file.read() assert os.listdir(os.path.join(tmpdir, "foo_app", "config")).sort() == ["__init__.py", "config.py"].sort() with open(os.path.join(tmpdir, "foo_app", "config", "config.py")) as config_file: config_content = config_file.read() assert 'Config.load("config/config.toml")' not in config_content assert all([x in config_content for x in ["Config.configure_csv_data_node", "Config.configure_task"]]) oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, "foo_app")) stdout = _run_template("main.py") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert "[Taipy][INFO] * Server starting on" in str(stdout, "utf-8")
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from config.config import configure from pages import job_page, scenario_page from pages.root import content, root, selected_data_node, selected_scenario import taipy as tp from taipy import Core, Gui def on_init(state): ... def on_change(state, var, val): if var == "selected_data_node" and val: state["scenario"].manage_data_node_partial(state) pages = { "/": root, "scenario": scenario_page, "jobs": job_page, } if __name__ == "__main__": # Instantiate, configure and run the Core core = Core() default_scenario_cfg = configure() core.run() # ################################################################################################################## # PLACEHOLDER: Initialize your data application here # # # # Example: # if len(tp.get_scenarios()) == 0: tp.create_scenario(default_scenario_cfg, name="Default Scenario") # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## # Instantiate, configure and run the GUI gui = Gui(pages=pages) data_node_partial = gui.add_partial("") gui.run(title="{{cookiecutter.__application_title}}", margin="0em")
|
from algos import clean_data from taipy import Config, Frequency, Scope def configure(): # ################################################################################################################## # PLACEHOLDER: Add your scenario configurations here # # # # Example: # initial_dataset_cfg = Config.configure_csv_data_node("initial_dataset", scope=Scope.CYCLE) replacement_type_cfg = Config.configure_data_node("replacement_type", default_data="NO VALUE") cleaned_dataset_cfg = Config.configure_csv_data_node("cleaned_dataset") clean_data_cfg = Config.configure_task( "clean_data", function=clean_data, input=[initial_dataset_cfg, replacement_type_cfg], output=cleaned_dataset_cfg, ) scenario_cfg = Config.configure_scenario( "scenario_configuration", task_configs=[clean_data_cfg], frequency=Frequency.DAILY ) return scenario_cfg # Comment, remove or replace the previous lines with your own use case # # ##################################################################################################################
|
from taipy import Config def configure(): Config.load("config/config.toml") return Config.scenarios["scenario_configuration"]
|
def clean_data(df, replacement_type): df = df.fillna(replacement_type) return df
|
from .algos import clean_data
|
from .scenario_page import scenario_page from .job_page import job_page
|
from taipy.gui import Markdown selected_scenario = None selected_data_node = None content = "" root = Markdown("pages/root.md")
|
from .job_page import job_page
|
from taipy.gui import Markdown job_page = Markdown("pages/job_page/job_page.md")
|
from taipy.gui import Markdown, notify from .data_node_management import manage_partial def notify_on_submission(state, submitable, details): if details['submission_status'] == 'COMPLETED': notify(state, "success", "Submision completed!") elif details['submission_status'] == 'FAILED': notify(state, "error", "Submission failed!") else: notify(state, "info", "In progress...") def manage_data_node_partial(state): manage_partial(state) scenario_page = Markdown("pages/scenario_page/scenario_page.md")
|
from .scenario_page import scenario_page
|
# build partial content for a specific data node def build_dn_partial(dn, dn_label): partial_content = "<|part|render={selected_scenario}|\n\n" # ################################################################################################################## # PLACEHOLDER: data node specific content before automatic content # # # # Example: # if dn_label == "replacement_type": partial_content += "All missing values will be replaced by the data node value." # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## # Automatic data node content partial_content += "<|{selected_scenario.data_nodes['" + dn.config_id + "']}|data_node|scenario={selected_scenario}|>\n\n" # ################################################################################################################## # PLACEHOLDER: data node specific content after automatic content # # # # Example: # if dn_label == "initial_dataset": partial_content += "Select your CSV file: <|{selected_data_node.path}|file_selector|extensions=.csv|on_action={lambda s: s.refresh('selected_scenario')}|>\n\n" # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## partial_content += "|>\n\n" return partial_content def manage_partial(state): dn = state.selected_data_node dn_label = dn.get_simple_label() partial_content = build_dn_partial(dn, dn_label) state.data_node_partial.update_content(state, partial_content)
|
import os import taipy # Add taipy version to requirements.txt with open(os.path.join(os.getcwd(), "requirements.txt"), "a") as requirement_file: requirement_file.write(f"taipy=={taipy.version._get_version()}\n") # Use TOML config file or not use_toml_config = "{{ cookiecutter.__use_toml_config }}".upper() if use_toml_config == "YES" or use_toml_config == "Y": os.remove(os.path.join(os.getcwd(), "config", "config.py")) os.rename( os.path.join(os.getcwd(), "config", "config_with_toml.py"), os.path.join(os.getcwd(), "config", "config.py") ) else: os.remove(os.path.join(os.getcwd(), "config", "config_with_toml.py")) os.remove(os.path.join(os.getcwd(), "config", "config.toml")) main_file_name = "{{cookiecutter.__main_file}}.py" print( f"New Taipy application has been created at {os.path.join(os.getcwd())}" f"\n\nTo start the application, change directory to the newly created folder:" f"\n\tcd {os.path.join(os.getcwd())}" f"\nand run the application as follows:" f"\n\ttaipy run {main_file_name}" )
|
""" Contain the application's configuration including the scenario configurations. The configuration is run by the Core service. """ from algorithms import * from taipy import Config # ############################################################################# # PLACEHOLDER: Put your application's configurations here # # # # Example: # # scenario_config = Config.configure_scenario("placeholder_scenario", []) # # Comment, remove or replace the previous lines with your own use case # # #############################################################################
|
from .config import *
|
""" This file is designed to contain the various Python functions used to configure tasks. The functions will be imported by the __init__.py file in this folder. """ # ################################################################################################################## # PLACEHOLDER: Put your Python functions here # # # # Example: # # def place_holder_algorithm(): # # pass # # Comment, remove or replace the previous lines with your own use case # # ##################################################################################################################
|
from algorithms import *
|
from .root import root_page
|
""" The root page of the application. Page content is imported from the root.md file. Please refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details. """ from taipy.gui import Markdown root_page = Markdown("pages/root.md")
|
""" A page of the application. Page content is imported from the page_example.md file. Please refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details. """ from taipy.gui import Markdown page_example = Markdown("pages/page_example/page_example.md")
|
import sys pages = "{{ cookiecutter.__pages }}".split(" ") # Remove empty string from pages list pages = [page for page in pages if page != ""] for page in pages: if not page.isidentifier(): sys.exit(f'Page name "{page}" is not a valid Python identifier. Please choose another name.')
|
import os import shutil import taipy def handle_services(use_rest, use_core): if use_core or use_rest: # Write "import taipy as tp" at the third line of the import.txt file with open(os.path.join(os.getcwd(), "sections", "import.txt"), "r") as import_file: import_lines = import_file.readlines() import_lines[0] = "import taipy as tp\n" + import_lines[0] + "\n" with open(os.path.join(os.getcwd(), "sections", "import.txt"), "w") as import_file: import_file.writelines(import_lines) # Import the necessary services if use_core and use_rest: with open(os.path.join(os.getcwd(), "sections", "import.txt"), "a") as import_file: import_file.write("from taipy import Core, Rest\n") elif use_core: with open(os.path.join(os.getcwd(), "sections", "import.txt"), "a") as import_file: import_file.write("from taipy import Core\n") elif use_rest: with open(os.path.join(os.getcwd(), "sections", "import.txt"), "a") as import_file: import_file.write("from taipy import Rest\n") # Start the Rest service if use_rest: with open(os.path.join(os.getcwd(), "sections", "main.txt"), "a") as main_file: main_file.write(" rest = Rest()\n") if use_core: # Create and submit the placeholder scenario with open(os.path.join(os.getcwd(), "sections", "main.txt"), "a") as main_file: main_file.write(" core = Core()\n") main_file.write(" core.run()\n") main_file.write(" # #############################################################################\n") main_file.write(" # PLACEHOLDER: Create and submit your scenario here #\n") main_file.write(" # #\n") main_file.write(" # Example: #\n") main_file.write(" # from configuration import scenario_config #\n") main_file.write(" # scenario = tp.create_scenario(scenario_config) #\n") main_file.write(" # scenario.submit() #\n") main_file.write(" # Comment, remove or replace the previous lines with your own use case #\n") main_file.write(" # #############################################################################\n") else: shutil.rmtree(os.path.join(os.getcwd(), "algorithms")) shutil.rmtree(os.path.join(os.getcwd(), "configuration")) def handle_run_service(): with open(os.path.join(os.getcwd(), "sections", "main.txt"), "a+") as main_file: main_file.seek(0) main_content = main_file.read() # Run Rest service along with the GUI service if "rest = Rest()" in main_content: main_file.write(' tp.run(gui, rest, title="{{cookiecutter.__application_title}}")\n') else: main_file.write(' gui.run(title="{{cookiecutter.__application_title}}")\n') def handle_single_page_app(): shutil.rmtree(os.path.join(os.getcwd(), "pages")) with open(os.path.join(os.getcwd(), "sections", "main.txt"), "a") as main_file: main_file.write("\n") main_file.write(" gui = Gui(page=page)\n") handle_run_service() with open(os.path.join(os.getcwd(), "sections", "page_content.txt"), "a") as page_content_file: page_content_file.write( ''' page = """ <center> <|navbar|lov={[("home", "Homepage")]}|> </center> """ ''' ) def handle_multi_page_app(pages): for page_name in pages: os.mkdir(os.path.join(os.getcwd(), "pages", page_name)) with open(os.path.join(os.getcwd(), "pages", "page_example", "page_example.md"), "r") as page_md_file: page_md_content = page_md_file.read() page_md_content = page_md_content.replace("Page example", page_name.replace("_", " ").title()) with open(os.path.join(os.getcwd(), "pages", page_name, page_name + ".md"), "w") as page_md_file: page_md_file.write(page_md_content) with open(os.path.join(os.getcwd(), "pages", "page_example", "page_example.py"), "r") as page_content_file: page_py_content = page_content_file.read() page_py_content = page_py_content.replace("page_example", page_name) with open(os.path.join(os.getcwd(), "pages", page_name, page_name + ".py"), "w") as page_content_file: page_content_file.write(page_py_content) with open(os.path.join(os.getcwd(), "pages", "__init__.py"), "a") as page_init_file: for page_name in pages: page_init_file.write(f"from .{page_name}.{page_name} import {page_name}\n") shutil.rmtree(os.path.join(os.getcwd(), "pages", "page_example")) newline = ",\n\t" user_page_dict = newline.join(f'"{page_name}": {page_name}' for page_name in pages) page_dict = """ pages = { "/": root_page, {pages} } """ with open(os.path.join(os.getcwd(), "sections", "page_content.txt"), "a") as page_content_file: page_content_file.write(page_dict.replace("{pages}", user_page_dict)) with open(os.path.join(os.getcwd(), "sections", "import.txt"), "a") as import_file: import_file.write("from pages import *\n") with open(os.path.join(os.getcwd(), "sections", "main.txt"), "a") as main_file: main_file.write("\n") main_file.write(" gui = Gui(pages=pages)\n") handle_run_service() def generate_main_file(): with open(os.path.join(os.getcwd(), "sections", "import.txt"), "r") as import_file: import_lines = import_file.read() with open(os.path.join(os.getcwd(), "sections", "page_content.txt"), "r") as page_content_file: page_content = page_content_file.read() with open(os.path.join(os.getcwd(), "sections", "main.txt"), "r") as main_file: main_lines = main_file.read() with open(os.path.join(os.getcwd(), "{{cookiecutter.__main_file}}.py"), "a") as app_main_file: app_main_file.write(import_lines) app_main_file.write("\n") app_main_file.write(page_content) app_main_file.write("\n\n") app_main_file.write(main_lines) with open(os.path.join(os.getcwd(), "requirements.txt"), "a") as requirement_file: requirement_file.write(f"taipy=={taipy.version._get_version()}\n") use_core = "{{ cookiecutter.__core }}".upper() use_rest = "{{ cookiecutter.__rest }}".upper() handle_services(use_rest in ["YES", "Y"], use_core in ["YES", "Y"]) pages = "{{ cookiecutter.__pages }}".split(" ") # Remove empty string from pages list pages = [page for page in pages if page != ""] if len(pages) == 0: handle_single_page_app() else: handle_multi_page_app(pages) generate_main_file() # Remove the sections folder shutil.rmtree(os.path.join(os.getcwd(), "sections")) main_file_name = "{{cookiecutter.__main_file}}.py" print( f"New Taipy application has been created at {os.path.join(os.getcwd())}" f"\n\nTo start the application, change directory to the newly created folder:" f"\n\tcd {os.path.join(os.getcwd())}" f"\nand run the application as follows:" f"\n\ttaipy run {main_file_name}" )
|
import os import threading from flask import Flask from pyngrok import ngrok from hf_hub_ctranslate2 import GeneratorCT2fromHfHub from flask import request, jsonify model_name = "taipy5-ct2" # note this is local folder model, the model uploaded to huggingface did not response correctly #model_name = "michaelfeil/ct2fast-starchat-alpha" #model_name = "michaelfeil/ct2fast-starchat-beta" model = GeneratorCT2fromHfHub( # load in int8 on CUDA model_name_or_path=model_name, device="cuda", compute_type="int8_float16", # tokenizer=AutoTokenizer.from_pretrained("{ORG}/{NAME}") ) def generate_text_batch(prompt_texts, max_length=64): outputs = model.generate(prompt_texts, max_length=max_length, include_prompt_in_result=False) return outputs app = Flask(__name__) port = "5000" # Open a ngrok tunnel to the HTTP server public_url = ngrok.connect(port).public_url print(" * ngrok tunnel \"{}\" -> \"http://127.0.0.1:{}\"".format(public_url, port)) # Update any base URLs to use the public ngrok URL app.config["BASE_URL"] = public_url # ... Update inbound traffic via APIs to use the public-facing ngrok URL # Define Flask routes @app.route("/") def index(): return "Hello from Colab!" @app.route("/api/generate", methods=["POST"]) def generate_code(): try: # Get the JSON data from the request body data = request.get_json() # Extract 'inputs' and 'parameters' from the JSON data inputs = data.get('inputs', "") parameters = data.get('parameters', {}) # Extract the 'max_new_tokens' parameter max_new_tokens = parameters.get('max_new_tokens', 64) # Call the generate_text_batch function with inputs and max_new_tokens generated_text = generate_text_batch([inputs], max_new_tokens)[0] return jsonify({ "generated_text": generated_text, "status": 200 }) except Exception as e: return jsonify({"error": str(e)}) # Start the Flask server in a new thread threading.Thread(target=app.run, kwargs={"use_reloader": False}).start()
|
import os import json def tokenize_code(code, max_characters=256): """ Tokenize code into snippets of specified max_characters, breaking at new lines. """ lines = code.split('\n') snippets = [] current_snippet = "" for line in lines: if len(current_snippet) + len(line) + 1 <= max_characters: current_snippet += line + '\n' else: snippets.append(current_snippet.strip()) current_snippet = line + '\n' if current_snippet: snippets.append(current_snippet.strip()) return snippets def process_file(file_path): """ Read a file, tokenize the code, and create snippets. """ with open(file_path, 'r', encoding='utf-8') as file: content = file.read() # Tokenize code into snippets of 128 characters at new lines snippets = tokenize_code(content) return snippets def escape_string(s): """ Do not escape triple quotes, double quotes, single quotes, and new lines. """ return s def main(input_folder, output_file): snippets_list = [] for root, dirs, files in os.walk(input_folder): for file in files: if file.endswith(('.py', '.md')): file_path = os.path.join(root, file) snippets = process_file(file_path) for snippet in snippets: escaped_snippet = escape_string(snippet) snippets_list.append({'text': escaped_snippet}) # Write snippets to a JSONL file with open(output_file, 'w', encoding='utf-8') as jsonl_file: for snippet in snippets_list: jsonl_file.write(json.dumps(snippet) + '\n') if __name__ == "__main__": input_folder = 'taipy' # replace with your folder path output_file = 'snippets.jsonl' # replace with your desired output file path main(input_folder, output_file)
|
from taipy.gui import Gui from tensorflow.keras import models from PIL import Image import numpy as np model = models.load_model("assets/baseline.keras") class_names = { 0: "airplane", 1: "automobile", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck", } def predict_image(model, path_to_image): img = Image.open(path_to_image) img = img.convert("RGB").resize((32, 32)) # Normalizing image data = np.asarray(img) print("Before: ", data[0][0]) # Printing color of very first pixel data = data / 255 # Comparing stuff to see if we broke something print("After: ", data[0][0]) # Printing color of very first pixel # Tricking model into thinking it is looking at an array of sample images and not a single image probability = model.predict(np.array([data])[:1]) probes = probability.max() prediction = class_names[np.argmax(probability)] return (probes, prediction) image_path = "assets/placeholder_image.png" prediction, prob, content = "", "", "" image_control_component = """ <|text-center| <|{"assets/logo.png"}|image|width=10vw|height=25vh|> <|{content}|file_selector|extensions=.png|> Select an image! <|{prediction}|> <|{image_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > """ index = image_control_component def on_change(state, variable_name, variable_value): if variable_name == "content": state.image_path = variable_value probes, prediction = predict_image(model, variable_value) state.prob = round(probes * 100) # Converting decimal to percentage state.prediction = f"This is a : {prediction}" app = Gui(page=index) if __name__ == "__main__": app.run(use_reloader=True)
|
from taipy.gui import Gui from tensorflow.keras import models from PIL import Image import numpy as np model = models.load_model("baseline_mariya.keras") class_names = { 0: "airplane", 1: "automobile", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck", } def predict_image(model, path_to_image): img = Image.open(path_to_image) img = img.convert("RGB").resize((32, 32)) # Normalizing image data = np.asarray(img) print("Before: ", data[0][0]) # Printing color of very first pixel data = data / 255 # Comparing stuff to see if we broke something print("After: ", data[0][0]) # Printing color of very first pixel # Tricking model into thinking it is looking at an array of sample images and not a single image probability = model.predict(np.array([data])[:1]) print(probability) probes = probability.max() prediction = class_names[np.argmax(probability)] return (probes, prediction) image_path = "placeholder_image.png" prediction, prob, content = "", "", "" image_control_component = """ <|text-center| <|{"logo.png"}|image|width=25vw|> <|{content}|file_selector|extensions=.png|> Select an image! <|{prediction}|> <|{image_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > """ index = image_control_component def on_change(state, variable_name, variable_value): if variable_name == "content": state.image_path = variable_value probes, prediction = predict_image(model, variable_value) state.prob = round(probes * 100) # Converting decimal to percentage state.prediction = f"This is a : {prediction}" app = Gui(page=index) if __name__ == "__main__": app.run(use_reloader=True)
|
print("Hello, World!") print("Hi Taipy!")
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import json import os from setuptools import find_namespace_packages, find_packages, setup with open("README.md") as readme_file: readme = readme_file.read() with open(f"src{os.sep}taipy{os.sep}rest{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" setup( author="Avaiga", name="taipy-rest", keywords="taipy-rest", python_requires=">=3.8", version=version_string, author_email="dev@taipy.io", packages=find_namespace_packages(where="src") + find_packages(include=["taipy", "taipy.rest"]), package_dir={"": "src"}, include_package_data=True, long_description=readme, long_description_content_type="text/markdown", description="Library to expose taipy-core REST APIs.", license="Apache License 2.0", classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", ], install_requires=[ "flask>=3.0.0,<3.1", "flask-restful>=0.3.9,<0.4", "passlib>=1.7.4,<1.8", "marshmallow>=3.20.1,<3.30", "apispec[yaml]>=6.3,<7.0", "apispec-webframeworks>=0.5.2,<0.6", "taipy-core@git+https://git@github.com/Avaiga/taipy-core.git@develop", ], )
|
from unittest import mock import pytest from flask import url_for from src.taipy.rest.api.exceptions.exceptions import ScenarioIdMissingException, SequenceNameMissingException from taipy.core.exceptions.exceptions import NonExistingScenario from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory def test_get_sequence(client, default_sequence): # test 404 user_url = url_for("api.sequence_by_id", sequence_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._get") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.get(url_for("api.sequence_by_id", sequence_id="foo")) assert rep.status_code == 200 def test_delete_sequence(client): # test 404 user_url = url_for("api.sequence_by_id", sequence_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._delete"), mock.patch( "taipy.core.sequence._sequence_manager._SequenceManager._get" ): # test get_sequence rep = client.delete(url_for("api.sequence_by_id", sequence_id="foo")) assert rep.status_code == 200 def test_create_sequence(client, default_scenario): sequences_url = url_for("api.sequences") rep = client.post(sequences_url, json={}) assert rep.status_code == 400 assert rep.json == {"message": "Scenario id is missing."} sequences_url = url_for("api.sequences") rep = client.post(sequences_url, json={"scenario_id": "SCENARIO_scenario_id"}) assert rep.status_code == 400 assert rep.json == {"message": "Sequence name is missing."} sequences_url = url_for("api.sequences") rep = client.post(sequences_url, json={"scenario_id": "SCENARIO_scenario_id", "sequence_name": "sequence"}) assert rep.status_code == 404 _ScenarioManagerFactory._build_manager()._set(default_scenario) with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._get") as config_mock: config_mock.return_value = default_scenario sequences_url = url_for("api.sequences") rep = client.post( sequences_url, json={"scenario_id": default_scenario.id, "sequence_name": "sequence", "tasks": []} ) assert rep.status_code == 201 def test_get_all_sequences(client, default_scenario_config_list): for ds in range(10): with mock.patch("src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenario_url = url_for("api.scenarios", config_id=config_mock.name) client.post(scenario_url) sequences_url = url_for("api.sequences") rep = client.get(sequences_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_sequence(client, default_sequence): # test 404 user_url = url_for("api.sequence_submit", sequence_id="foo") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._get") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.post(url_for("api.sequence_submit", sequence_id="foo")) assert rep.status_code == 200
|
from unittest import mock from flask import url_for def test_get_job(client, default_job): # test 404 user_url = url_for("api.job_by_id", job_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.job._job_manager._JobManager._get") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.get(url_for("api.job_by_id", job_id="foo")) assert rep.status_code == 200 def test_delete_job(client): # test 404 user_url = url_for("api.job_by_id", job_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.job._job_manager._JobManager._delete"), mock.patch( "taipy.core.job._job_manager._JobManager._get" ): # test get_job rep = client.delete(url_for("api.job_by_id", job_id="foo")) assert rep.status_code == 200 def test_create_job(client, default_task_config): # without config param jobs_url = url_for("api.jobs") rep = client.post(jobs_url) assert rep.status_code == 400 with mock.patch("src.taipy.rest.api.resources.job.JobList.fetch_config") as config_mock: config_mock.return_value = default_task_config jobs_url = url_for("api.jobs", task_id="foo") rep = client.post(jobs_url) assert rep.status_code == 201 def test_get_all_jobs(client, create_job_list): jobs_url = url_for("api.jobs") rep = client.get(jobs_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_cancel_job(client, default_job): # test 404 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory _OrchestratorFactory._build_orchestrator() _OrchestratorFactory._build_dispatcher() user_url = url_for("api.job_cancel", job_id="foo") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.job._job_manager._JobManager._get") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.post(url_for("api.job_cancel", job_id="foo")) assert rep.status_code == 200
|
import os import shutil import uuid from datetime import datetime, timedelta import pandas as pd import pytest from dotenv import load_dotenv from src.taipy.rest.app import create_app from taipy.config import Config from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.core import Cycle, DataNodeId, Job, JobId, Scenario, Sequence, Task from taipy.core.cycle._cycle_manager import _CycleManager from taipy.core.data.in_memory import InMemoryDataNode from taipy.core.job._job_manager import _JobManager from taipy.core.task._task_manager import _TaskManager from .setup.shared.algorithms import evaluate, forecast @pytest.fixture def setup_end_to_end(): model_cfg = Config.configure_data_node("model", path="setup/my_model.p", storage_type="pickle") day_cfg = Config.configure_data_node(id="day") forecasts_cfg = Config.configure_data_node(id="forecasts") forecast_task_cfg = Config.configure_task( id="forecast_task", input=[model_cfg, day_cfg], function=forecast, output=forecasts_cfg, ) historical_temperature_cfg = Config.configure_data_node( "historical_temperature", storage_type="csv", path="setup/historical_temperature.csv", has_header=True, ) evaluation_cfg = Config.configure_data_node("evaluation") evaluate_task_cfg = Config.configure_task( "evaluate_task", input=[historical_temperature_cfg, forecasts_cfg, day_cfg], function=evaluate, output=evaluation_cfg, ) scenario_config = Config.configure_scenario( "scenario", [forecast_task_cfg, evaluate_task_cfg], frequency=Frequency.DAILY ) scenario_config.add_sequences({"sequence": [forecast_task_cfg, evaluate_task_cfg]}) @pytest.fixture() def app(): load_dotenv(".testenv") app = create_app(testing=True) app.config.update( { "TESTING": True, } ) with app.app_context(), app.test_request_context(): yield app @pytest.fixture() def client(app): return app.test_client() @pytest.fixture def datanode_data(): return { "name": "foo", "storage_type": "in_memory", "scope": "scenario", "default_data": ["1991-01-01T00:00:00"], } @pytest.fixture def task_data(): return { "config_id": "foo", "input_ids": ["DATASOURCE_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54"], "function_name": "print", "function_module": "builtins", "output_ids": ["DATASOURCE_foo_4d9923b8-eb9f-4f3c-8055-3a1ce8bee309"], } @pytest.fixture def sequence_data(): return { "name": "foo", "task_ids": ["TASK_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54"], } @pytest.fixture def scenario_data(): return { "name": "foo", "sequence_ids": ["SEQUENCE_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54"], "properties": {}, } @pytest.fixture def default_datanode(): return InMemoryDataNode( "input_ds", Scope.SCENARIO, DataNodeId("f"), "my name", "owner_id", properties={"default_data": [1, 2, 3, 4, 5, 6]}, ) @pytest.fixture def default_df_datanode(): return InMemoryDataNode( "input_ds", Scope.SCENARIO, DataNodeId("id_uio2"), "my name", "owner_id", properties={"default_data": pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}])}, ) @pytest.fixture def default_datanode_config(): return Config.configure_data_node(f"taipy_{uuid.uuid4().hex}", "in_memory", Scope.SCENARIO) @pytest.fixture def default_datanode_config_list(): configs = [] for i in range(10): configs.append(Config.configure_data_node(id=f"ds_{i}", storage_type="in_memory", scope=Scope.SCENARIO)) return configs def __default_task(): input_ds = InMemoryDataNode( "input_ds", Scope.SCENARIO, DataNodeId("id_uio"), "my name", "owner_id", properties={"default_data": "In memory Data Source"}, ) output_ds = InMemoryDataNode( "output_ds", Scope.SCENARIO, DataNodeId("id_uio"), "my name", "owner_id", properties={"default_data": "In memory Data Source"}, ) return Task( config_id="foo", properties={}, function=print, input=[input_ds], output=[output_ds], id=None, ) @pytest.fixture def default_task(): return __default_task() @pytest.fixture def default_task_config(): return Config.configure_task("task1", print, [], []) @pytest.fixture def default_task_config_list(): configs = [] for i in range(10): configs.append(Config.configure_task(f"task_{i}", print, [], [])) return configs def __default_sequence(): return Sequence(properties={"name": "foo"}, tasks=[__default_task()], sequence_id="SEQUENCE_foo_SCENARIO_acb") def __task_config(): return Config.configure_task("task1", print, [], []) @pytest.fixture def default_sequence(): return __default_sequence() @pytest.fixture def default_scenario_config(): task_config = __task_config() scenario_config = Config.configure_scenario( f"taipy_{uuid.uuid4().hex}", [task_config], ) scenario_config.add_sequences({"sequence": [task_config]}) return scenario_config @pytest.fixture def default_scenario_config_list(): configs = [] for _ in range(10): task_config = Config.configure_task(f"taipy_{uuid.uuid4().hex}", print) scenario_config = Config.configure_scenario( f"taipy_{uuid.uuid4().hex}", [task_config], ) scenario_config.add_sequences({"sequence": [task_config]}) configs.append(scenario_config) return configs @pytest.fixture def default_scenario(): return Scenario(config_id="foo", properties={}, tasks=[__default_task()], scenario_id="SCENARIO_scenario_id") def __create_cycle(name="foo"): now = datetime.now() return Cycle( name=name, frequency=Frequency.DAILY, properties={}, creation_date=now, start_date=now, end_date=now + timedelta(days=5), ) @pytest.fixture def create_cycle_list(): cycles = [] manager = _CycleManager for i in range(10): c = __create_cycle(f"cycle_{1}") return cycles @pytest.fixture def cycle_data(): return { "name": "foo", "frequency": "daily", "properties": {}, "creation_date": "2022-02-03T22:17:27.317114", "start_date": "2022-02-03T22:17:27.317114", "end_date": "2022-02-08T22:17:27.317114", } @pytest.fixture def default_cycle(): return __create_cycle() def __create_job(): task_manager = _TaskManager task = __default_task() task_manager._set(task) submit_id = f"SUBMISSION_{str(uuid.uuid4())}" return Job(id=JobId(f"JOB_{uuid.uuid4()}"), task=task, submit_id=submit_id, submit_entity_id=task.id) @pytest.fixture def default_job(): return __create_job() @pytest.fixture def create_job_list(): jobs = [] manager = _JobManager for i in range(10): c = __create_job() return jobs @pytest.fixture(scope="function", autouse=True) def cleanup_files(): Config.unblock_update() if os.path.exists(".data"): shutil.rmtree(".data")
|
from unittest import mock from flask import url_for def test_get_task(client, default_task): # test 404 user_url = url_for("api.task_by_id", task_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.task._task_manager._TaskManager._get") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.get(url_for("api.task_by_id", task_id="foo")) assert rep.status_code == 200 def test_delete_task(client): # test 404 user_url = url_for("api.task_by_id", task_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.task._task_manager._TaskManager._delete"), mock.patch( "taipy.core.task._task_manager._TaskManager._get" ): # test get_task rep = client.delete(url_for("api.task_by_id", task_id="foo")) assert rep.status_code == 200 def test_create_task(client, default_task_config): # without config param tasks_url = url_for("api.tasks") rep = client.post(tasks_url) assert rep.status_code == 400 # config does not exist tasks_url = url_for("api.tasks", config_id="foo") rep = client.post(tasks_url) assert rep.status_code == 404 with mock.patch("src.taipy.rest.api.resources.task.TaskList.fetch_config") as config_mock: config_mock.return_value = default_task_config tasks_url = url_for("api.tasks", config_id="bar") rep = client.post(tasks_url) assert rep.status_code == 201 def test_get_all_tasks(client, task_data, default_task_config_list): for ds in range(10): with mock.patch("src.taipy.rest.api.resources.task.TaskList.fetch_config") as config_mock: config_mock.return_value = default_task_config_list[ds] tasks_url = url_for("api.tasks", config_id=config_mock.name) client.post(tasks_url) rep = client.get(tasks_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_execute_task(client, default_task): # test 404 user_url = url_for("api.task_submit", task_id="foo") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.task._task_manager._TaskManager._get") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.post(url_for("api.task_submit", task_id="foo")) assert rep.status_code == 200
|
from functools import wraps from unittest.mock import MagicMock, patch from src.taipy.rest.api.middlewares._middleware import _middleware def mock_enterprise_middleware(f): @wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper @patch("src.taipy.rest.api.middlewares._middleware._using_enterprise") @patch("src.taipy.rest.api.middlewares._middleware._enterprise_middleware") def test_enterprise_middleware_applied_when_enterprise_is_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = True @_middleware def f(): return "f" rv = f() assert rv == "f" using_enterprise.assert_called_once() enterprise_middleware.assert_called_once() @patch("src.taipy.rest.api.middlewares._middleware._using_enterprise") @patch("src.taipy.rest.api.middlewares._middleware._enterprise_middleware") def test_enterprise_middleware_not_applied_when_enterprise_is_not_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = False @_middleware def f(): return "f" rv = f() assert rv == "f" using_enterprise.assert_called_once() enterprise_middleware.assert_not_called()
|
from unittest import mock import pytest from flask import url_for def test_get_datanode(client, default_datanode): # test 404 user_url = url_for("api.datanode_by_id", datanode_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.data._data_manager._DataManager._get") as manager_mock: manager_mock.return_value = default_datanode # test get_datanode rep = client.get(url_for("api.datanode_by_id", datanode_id="foo")) assert rep.status_code == 200 def test_delete_datanode(client): # test 404 user_url = url_for("api.datanode_by_id", datanode_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.data._data_manager._DataManager._delete"), mock.patch( "taipy.core.data._data_manager._DataManager._get" ): # test get_datanode rep = client.delete(url_for("api.datanode_by_id", datanode_id="foo")) assert rep.status_code == 200 def test_create_datanode(client, default_datanode_config): # without config param datanodes_url = url_for("api.datanodes") rep = client.post(datanodes_url) assert rep.status_code == 400 # config does not exist datanodes_url = url_for("api.datanodes", config_id="foo") rep = client.post(datanodes_url) assert rep.status_code == 404 with mock.patch("src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config") as config_mock: config_mock.return_value = default_datanode_config datanodes_url = url_for("api.datanodes", config_id="bar") rep = client.post(datanodes_url) assert rep.status_code == 201 def test_get_all_datanodes(client, default_datanode_config_list): for ds in range(10): with mock.patch("src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config") as config_mock: config_mock.return_value = default_datanode_config_list[ds] datanodes_url = url_for("api.datanodes", config_id=config_mock.name) client.post(datanodes_url) rep = client.get(datanodes_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_read_datanode(client, default_df_datanode): with mock.patch("taipy.core.data._data_manager._DataManager._get") as config_mock: config_mock.return_value = default_df_datanode # without operators datanodes_url = url_for("api.datanode_reader", datanode_id="foo") rep = client.get(datanodes_url, json={}) assert rep.status_code == 200 # Without operators and body rep = client.get(datanodes_url) assert rep.status_code == 200 # TODO: Revisit filter test # operators = {"operators": [{"key": "a", "value": 5, "operator": "LESS_THAN"}]} # rep = client.get(datanodes_url, json=operators) # assert rep.status_code == 200 def test_write_datanode(client, default_datanode): with mock.patch("taipy.core.data._data_manager._DataManager._get") as config_mock: config_mock.return_value = default_datanode # Get DataNode datanodes_read_url = url_for("api.datanode_reader", datanode_id=default_datanode.id) rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {"data": [1, 2, 3, 4, 5, 6]} datanodes_write_url = url_for("api.datanode_writer", datanode_id=default_datanode.id) rep = client.put(datanodes_write_url, json=[1, 2, 3]) assert rep.status_code == 200 rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {"data": [1, 2, 3]}
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import json from typing import Dict from flask import url_for def create_and_submit_scenario(config_id: str, client) -> Dict: response = client.post(url_for("api.scenarios", config_id=config_id)) assert response.status_code == 201 scenario = response.json.get("scenario") assert (set(scenario) - set(json.load(open("tests/json/expected/scenario.json")))) == set() response = client.post(url_for("api.scenario_submit", scenario_id=scenario.get("id"))) assert response.status_code == 200 return scenario def get(url, name, client) -> Dict: response = client.get(url) returned_data = response.json.get(name) assert (set(returned_data) - set(json.load(open(f"tests/json/expected/{name}.json")))) == set() return returned_data def get_assert_status(url, client, status_code) -> None: response = client.get(url) assert response.status_code == status_code def get_all(url, expected_quantity, client): response = client.get(url) assert len(response.json) == expected_quantity def delete(url, client): response = client.delete(url) assert response.status_code == 200 def test_end_to_end(client, setup_end_to_end): # Create Scenario: Should also create all of its dependencies(sequences, tasks, datanodes, etc) scenario = create_and_submit_scenario("scenario", client) # Get other models and verify if they return the necessary fields cycle = get(url_for("api.cycle_by_id", cycle_id=scenario.get("cycle")), "cycle", client) sequence = get( url_for("api.sequence_by_id", sequence_id=f"SEQUENCE_sequence_{scenario['id']}"), "sequence", client, ) task = get(url_for("api.task_by_id", task_id=sequence.get("tasks")[0]), "task", client) datanode = get( url_for("api.datanode_by_id", datanode_id=task.get("input_ids")[0]), "datanode", client, ) # Get All get_all(url_for("api.scenarios"), 1, client) get_all(url_for("api.cycles"), 1, client) get_all(url_for("api.sequences"), 1, client) get_all(url_for("api.tasks"), 2, client) get_all(url_for("api.datanodes"), 5, client) get_all(url_for("api.jobs"), 2, client) # Delete entities delete(url_for("api.cycle_by_id", cycle_id=cycle.get("id")), client) delete(url_for("api.sequence_by_id", sequence_id=sequence.get("id")), client) delete(url_for("api.task_by_id", task_id=task.get("id")), client) delete(url_for("api.datanode_by_id", datanode_id=datanode.get("id")), client) # Check status code # Non-existing entities should return 404 get_assert_status(url_for("api.cycle_by_id", cycle_id=9999999), client, 404) get_assert_status(url_for("api.scenario_by_id", scenario_id=9999999), client, 404) get_assert_status(url_for("api.sequence_by_id", sequence_id=9999999), client, 404) get_assert_status(url_for("api.task_by_id", task_id=9999999), client, 404) get_assert_status(url_for("api.datanode_by_id", datanode_id=9999999), client, 404) # Check URL with and without trailing slashes url_with_slash = url_for("api.scenarios") url_without_slash = url_for("api.scenarios")[:-1] get_all(url_with_slash, 1, client) get_all(url_without_slash, 1, client)
|
from unittest import mock from flask import url_for def test_get_cycle(client, default_cycle): # test 404 cycle_url = url_for("api.cycle_by_id", cycle_id="foo") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch("taipy.core.cycle._cycle_manager._CycleManager._get") as manager_mock: manager_mock.return_value = default_cycle # test get_cycle rep = client.get(url_for("api.cycle_by_id", cycle_id="foo")) assert rep.status_code == 200 def test_delete_cycle(client): # test 404 cycle_url = url_for("api.cycle_by_id", cycle_id="foo") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch("taipy.core.cycle._cycle_manager._CycleManager._delete"), mock.patch( "taipy.core.cycle._cycle_manager._CycleManager._get" ): # test get_cycle rep = client.delete(url_for("api.cycle_by_id", cycle_id="foo")) assert rep.status_code == 200 def test_create_cycle(client, cycle_data): # without config param cycles_url = url_for("api.cycles") data = {"bad": "data"} rep = client.post(cycles_url, json=data) assert rep.status_code == 400 rep = client.post(cycles_url, json=cycle_data) assert rep.status_code == 201 def test_get_all_cycles(client, create_cycle_list): cycles_url = url_for("api.cycles") rep = client.get(cycles_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10
|
from unittest import mock import pytest from flask import url_for def test_get_scenario(client, default_scenario): # test 404 user_url = url_for("api.scenario_by_id", scenario_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._get") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.get(url_for("api.scenario_by_id", scenario_id="foo")) assert rep.status_code == 200 def test_delete_scenario(client): # test 404 user_url = url_for("api.scenario_by_id", scenario_id="foo") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._delete"), mock.patch( "taipy.core.scenario._scenario_manager._ScenarioManager._get" ): # test get_scenario rep = client.delete(url_for("api.scenario_by_id", scenario_id="foo")) assert rep.status_code == 200 def test_create_scenario(client, default_scenario_config): # without config param scenarios_url = url_for("api.scenarios") rep = client.post(scenarios_url) assert rep.status_code == 400 # config does not exist scenarios_url = url_for("api.scenarios", config_id="foo") rep = client.post(scenarios_url) assert rep.status_code == 404 with mock.patch("src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config") as config_mock: config_mock.return_value = default_scenario_config scenarios_url = url_for("api.scenarios", config_id="bar") rep = client.post(scenarios_url) assert rep.status_code == 201 def test_get_all_scenarios(client, default_sequence, default_scenario_config_list): for ds in range(10): with mock.patch("src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenarios_url = url_for("api.scenarios", config_id=config_mock.name) client.post(scenarios_url) rep = client.get(scenarios_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_scenario(client, default_scenario): # test 404 user_url = url_for("api.scenario_submit", scenario_id="foo") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._get") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.post(url_for("api.scenario_submit", scenario_id="foo")) assert rep.status_code == 200
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import pickle import random from datetime import datetime, timedelta from typing import Any, Dict import pandas as pd n_predictions = 14 def forecast(model, date: datetime): dates = [date + timedelta(days=i) for i in range(n_predictions)] forecasts = [f + random.uniform(0, 2) for f in model.forecast(len(dates))] days = [str(dt.date()) for dt in dates] res = {"Date": days, "Forecast": forecasts} return pd.DataFrame.from_dict(res) def evaluate(cleaned: pd.DataFrame, forecasts: pd.DataFrame, date: datetime) -> Dict[str, Any]: cleaned = cleaned[cleaned["Date"].isin(forecasts["Date"].tolist())] forecasts_as_series = pd.Series(forecasts["Forecast"].tolist(), name="Forecast") res = pd.concat([cleaned.reset_index(), forecasts_as_series], axis=1) res["Delta"] = abs(res["Forecast"] - res["Value"]) return { "Date": date, "Dataframe": res, "Mean_absolute_error": res["Delta"].mean(), "Relative_error": (res["Delta"].mean() * 100) / res["Value"].mean(), } if __name__ == "__main__": model = pickle.load(open("../my_model.p", "rb")) day = datetime(2020, 1, 25) forecasts = forecast(model, day) historical_temperature = pd.read_csv("../historical_temperature.csv") evaluation = evaluate(historical_temperature, forecasts, day) print(evaluation["Dataframe"]) print() print(f'Mean absolute error : {evaluation["Mean_absolute_error"]}') print(f'Relative error in %: {evaluation["Relative_error"]}')
|
from taipy.core import Config, Frequency from .algorithms import evaluate, forecast model_cfg = Config.configure_data_node("model", path="my_model.p", storage_type="pickle") day_cfg = Config.configure_data_node(id="day") forecasts_cfg = Config.configure_data_node(id="forecasts") forecast_task_cfg = Config.configure_task( id="forecast_task", input=[model_cfg, day_cfg], function=forecast, output=forecasts_cfg, ) historical_temperature_cfg = Config.configure_data_node( "historical_temperature", storage_type="csv", path="historical_temperature.csv", has_header=True, ) evaluation_cfg = Config.configure_data_node("evaluation") evaluate_task_cfg = Config.configure_task( "evaluate_task", input=[historical_temperature_cfg, forecasts_cfg, day_cfg], function=evaluate, output=evaluation_cfg, ) scenario_cfg = Config.configure_scenario("scenario", [forecast_task_cfg, evaluate_task_cfg], frequency=Frequency.DAILY) scenario_cfg.add_sequences({"sequence": [forecast_task_cfg, evaluate_task_cfg]})
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from importlib.util import find_spec if find_spec("taipy"): if find_spec("taipy.config"): from taipy.config._init import * # type: ignore if find_spec("taipy.gui"): from taipy.gui._init import * # type: ignore if find_spec("taipy.core"): from taipy.core._init import * # type: ignore if find_spec("taipy.rest"): from taipy.rest._init import * # type: ignore if find_spec("taipy.gui_core"): from taipy.gui_core._init import * # type: ignore if find_spec("taipy.enterprise"): from taipy.enterprise._init import * # type: ignore if find_spec("taipy._run"): from taipy._run import _run as run # type: ignore
|
import json import os def _get_version(): with open(f"{os.path.dirname(os.path.abspath(__file__))}{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" return version_string
|
from .rest import Rest
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from taipy.config import Config from .app import create_app as _create_app class Rest: """ Runnable Rest application serving REST APIs on top of Taipy Core functionalities. """ def __init__(self): """ Initialize a REST API server. A Flask application is instantiated and configured using three parameters from the global config. - Config.global_config.testing (bool): Run the application on testing mode. - Config.global_config.env (Optional[str]): The application environment. - Config.global_config.secret_key (Optional[str]): Application server secret key. However, editing these parameters is only recommended for advanced users. Indeed, the default behavior of the REST server without any required configuration satisfies all the standard and basic needs. """ self._app = _create_app(Config.global_config.testing or False, Config.global_config.env, Config.global_config.secret_key) def run(self, **kwargs): """ Start a REST API server. This method is blocking. Parameters: **kwargs : Options to provide to the application server. """ self._app.run(**kwargs)
|
"""# Taipy Rest The Taipy Rest package exposes the Runnable `Rest^` service to provide REST APIs on top of Taipy Core. (more details on Taipy Core functionalities in the [user manual](../../../manuals/core/)). Once the `Rest^` service runs, users can call REST APIs to create, read, update, submit and remove Taipy entities (including cycles, scenarios, sequences, tasks, jobs, and data nodes). It is handy when it comes to integrating a Taipy application in a more complex IT ecosystem. Please refer to [REST API](../../reference_rest/) page to get the exhaustive list of available APIs.""" from ._init import * from .version import _get_version __version__ = _get_version()
|
"""Extensions registry All extensions here are used as singletons and initialized in application factory """ from .commons.apispec import APISpecExt apispec = APISpecExt()
|
import os from flask import Flask from . import api from .commons.encoder import _CustomEncoder from .extensions import apispec def create_app(testing=False, flask_env=None, secret_key=None): """Application factory, used to create application""" app = Flask(__name__) app.config.update( ENV=os.getenv("FLASK_ENV", flask_env), TESTING=os.getenv("TESTING", testing), SECRET_KEY=os.getenv("SECRET_KEY", secret_key), ) app.url_map.strict_slashes = False app.config["RESTFUL_JSON"] = {"cls": _CustomEncoder} configure_apispec(app) register_blueprints(app) with app.app_context(): api.views.register_views() return app def configure_apispec(app): """Configure APISpec for swagger support""" apispec.init_app(app) apispec.spec.components.schema( "PaginatedResult", { "properties": { "total": {"type": "integer"}, "pages": {"type": "integer"}, "next": {"type": "string"}, "prev": {"type": "string"}, } }, ) def register_blueprints(app): """Register all blueprints for application""" app.register_blueprint(api.views.blueprint)
|
from taipy.core.cycle._cycle_converter import _CycleConverter from taipy.core.data._data_converter import _DataNodeConverter from taipy.core.scenario._scenario_converter import _ScenarioConverter from taipy.core.sequence._sequence_converter import _SequenceConverter from taipy.core.task._task_converter import _TaskConverter entity_to_models = { "scenario": _ScenarioConverter._entity_to_model, "sequence": _SequenceConverter._entity_to_model, "task": _TaskConverter._entity_to_model, "data": _DataNodeConverter._entity_to_model, "cycle": _CycleConverter._entity_to_model, } def _to_model(repository, entity, **kwargs): return entity_to_models[repository](entity)
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from apispec import APISpec from apispec.exceptions import APISpecError from apispec.ext.marshmallow import MarshmallowPlugin from apispec_webframeworks.flask import FlaskPlugin from flask import Blueprint, jsonify, render_template class FlaskRestfulPlugin(FlaskPlugin): """Small plugin override to handle flask-restful resources""" @staticmethod def _rule_for_view(view, app=None): view_funcs = app.view_functions endpoint = None for ept, view_func in view_funcs.items(): if hasattr(view_func, "view_class"): view_func = view_func.view_class if view_func == view: endpoint = ept if not endpoint: raise APISpecError("Could not find endpoint for view {0}".format(view)) # WARNING: Assume 1 rule per view function for now rule = app.url_map._rules_by_endpoint[endpoint][0] return rule class APISpecExt: """Very simple and small extension to use apispec with this API as a flask extension""" def __init__(self, app=None, **kwargs): self.spec = None if app is not None: self.init_app(app, **kwargs) def init_app(self, app, **kwargs): app.config.setdefault("APISPEC_TITLE", "Taipy Rest") app.config.setdefault("APISPEC_VERSION", "1.0.0") app.config.setdefault("OPENAPI_VERSION", "3.0.2") app.config.setdefault("SWAGGER_JSON_URL", "/swagger.json") app.config.setdefault("SWAGGER_UI_URL", "/swagger-ui") app.config.setdefault("OPENAPI_YAML_URL", "/openapi.yaml") app.config.setdefault("REDOC_UI_URL", "/redoc-ui") app.config.setdefault("SWAGGER_URL_PREFIX", None) self.spec = APISpec( title=app.config["APISPEC_TITLE"], version=app.config["APISPEC_VERSION"], openapi_version=app.config["OPENAPI_VERSION"], plugins=[MarshmallowPlugin(), FlaskRestfulPlugin()], **kwargs ) blueprint = Blueprint( "swagger", __name__, template_folder="./templates", url_prefix=app.config["SWAGGER_URL_PREFIX"], ) blueprint.add_url_rule(app.config["SWAGGER_JSON_URL"], "swagger_json", self.swagger_json) blueprint.add_url_rule(app.config["SWAGGER_UI_URL"], "swagger_ui", self.swagger_ui) blueprint.add_url_rule(app.config["OPENAPI_YAML_URL"], "openapi_yaml", self.openapi_yaml) blueprint.add_url_rule(app.config["REDOC_UI_URL"], "redoc_ui", self.redoc_ui) app.register_blueprint(blueprint) def swagger_json(self): return jsonify(self.spec.to_dict()) def swagger_ui(self): return render_template("swagger.j2") def openapi_yaml(self): # Manually inject ReDoc's Authentication legend, then remove it self.spec.tag( { "name": "authentication", "x-displayName": "Authentication", "description": "<SecurityDefinitions />", } ) redoc_spec = self.spec.to_yaml() self.spec._tags.pop(0) return redoc_spec def redoc_ui(self): return render_template("redoc.j2")
|
import json from typing import Any, Union from datetime import datetime from enum import Enum Json = Union[dict, list, str, int, float, bool, None] class _CustomEncoder(json.JSONEncoder): def default(self, o: Any) -> Json: if isinstance(o, Enum): result = o.value elif isinstance(o, datetime): result = {"__type__": "Datetime", "__value__": o.isoformat()} else: result = json.JSONEncoder.default(self, o) return result
|
"""Simple helper to paginate query """ from flask import request, url_for DEFAULT_PAGE_SIZE = 50 DEFAULT_PAGE_NUMBER = 1 def extract_pagination(page=None, per_page=None, **request_args): page = int(page) if page is not None else DEFAULT_PAGE_NUMBER per_page = int(per_page) if per_page is not None else DEFAULT_PAGE_SIZE return page, per_page, request_args def paginate(query, schema): page, per_page, other_request_args = extract_pagination(**request.args) page_obj = query.paginate(page=page, per_page=per_page) next_ = url_for( request.endpoint, page=page_obj.next_num if page_obj.has_next else page_obj.page, per_page=per_page, **other_request_args, **request.view_args ) prev = url_for( request.endpoint, page=page_obj.prev_num if page_obj.has_prev else page_obj.page, per_page=per_page, **other_request_args, **request.view_args ) return { "total": page_obj.total, "pages": page_obj.pages, "next": next_, "prev": prev, "results": schema.dump(page_obj.items), }
|
from . import error_handler, views __all__ = ["views", "error_handler"]
|
from flask import jsonify from marshmallow import ValidationError from taipy.core.exceptions.exceptions import ( NonExistingCycle, NonExistingDataNode, NonExistingDataNodeConfig, NonExistingJob, NonExistingScenario, NonExistingScenarioConfig, NonExistingSequence, NonExistingSequenceConfig, NonExistingTask, NonExistingTaskConfig, ) from .exceptions.exceptions import ConfigIdMissingException, ScenarioIdMissingException, SequenceNameMissingException from .views import blueprint def _create_404(e): return {"message": e.message}, 404 @blueprint.errorhandler(ValidationError) def handle_marshmallow_error(e): """Return json error for marshmallow validation errors. This will avoid having to try/catch ValidationErrors in all endpoints, returning correct JSON response with associated HTTP 400 Status (https://tools.ietf.org/html/rfc7231#section-6.5.1) """ return jsonify(e.messages), 400 @blueprint.errorhandler(ConfigIdMissingException) def handle_config_id_missing_exception(e): return jsonify({"message": e.message}), 400 @blueprint.errorhandler(ScenarioIdMissingException) def handle_scenario_id_missing_exception(e): return jsonify({"message": e.message}), 400 @blueprint.errorhandler(SequenceNameMissingException) def handle_sequence_name_missing_exception(e): return jsonify({"message": e.message}), 400 @blueprint.errorhandler(NonExistingDataNode) def handle_data_node_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingDataNodeConfig) def handle_data_node_config_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingCycle) def handle_cycle_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingJob) def handle_job_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingSequence) def handle_sequence_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingSequenceConfig) def handle_sequence_config_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingScenario) def handle_scenario_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingScenarioConfig) def handle_scenario_config_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingTask) def handle_task_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingTaskConfig) def handle_task_config_not_found(e): return _create_404(e)
|
from flask import Blueprint, current_app from flask_restful import Api from taipy.core.common._utils import _load_fct from taipy.logger._taipy_logger import _TaipyLogger from ..extensions import apispec from .middlewares._middleware import _using_enterprise from .resources import ( CycleList, CycleResource, DataNodeList, DataNodeReader, DataNodeResource, DataNodeWriter, JobExecutor, JobList, JobResource, ScenarioExecutor, ScenarioList, ScenarioResource, SequenceExecutor, SequenceList, SequenceResource, TaskExecutor, TaskList, TaskResource, ) from .schemas import CycleSchema, DataNodeSchema, JobSchema, ScenarioSchema, SequenceSchema, TaskSchema _logger = _TaipyLogger._get_logger() blueprint = Blueprint("api", __name__, url_prefix="/api/v1") api = Api(blueprint) api.add_resource( DataNodeResource, "/datanodes/<string:datanode_id>/", endpoint="datanode_by_id", resource_class_kwargs={"logger": _logger}, ) api.add_resource( DataNodeReader, "/datanodes/<string:datanode_id>/read/", endpoint="datanode_reader", resource_class_kwargs={"logger": _logger}, ) api.add_resource( DataNodeWriter, "/datanodes/<string:datanode_id>/write/", endpoint="datanode_writer", resource_class_kwargs={"logger": _logger}, ) api.add_resource( DataNodeList, "/datanodes/", endpoint="datanodes", resource_class_kwargs={"logger": _logger}, ) api.add_resource( TaskResource, "/tasks/<string:task_id>/", endpoint="task_by_id", resource_class_kwargs={"logger": _logger}, ) api.add_resource(TaskList, "/tasks/", endpoint="tasks", resource_class_kwargs={"logger": _logger}) api.add_resource( TaskExecutor, "/tasks/submit/<string:task_id>/", endpoint="task_submit", resource_class_kwargs={"logger": _logger}, ) api.add_resource( SequenceResource, "/sequences/<string:sequence_id>/", endpoint="sequence_by_id", resource_class_kwargs={"logger": _logger}, ) api.add_resource( SequenceList, "/sequences/", endpoint="sequences", resource_class_kwargs={"logger": _logger}, ) api.add_resource( SequenceExecutor, "/sequences/submit/<string:sequence_id>/", endpoint="sequence_submit", resource_class_kwargs={"logger": _logger}, ) api.add_resource( ScenarioResource, "/scenarios/<string:scenario_id>/", endpoint="scenario_by_id", resource_class_kwargs={"logger": _logger}, ) api.add_resource( ScenarioList, "/scenarios/", endpoint="scenarios", resource_class_kwargs={"logger": _logger}, ) api.add_resource( ScenarioExecutor, "/scenarios/submit/<string:scenario_id>/", endpoint="scenario_submit", resource_class_kwargs={"logger": _logger}, ) api.add_resource( CycleResource, "/cycles/<string:cycle_id>/", endpoint="cycle_by_id", resource_class_kwargs={"logger": _logger}, ) api.add_resource( CycleList, "/cycles/", endpoint="cycles", resource_class_kwargs={"logger": _logger}, ) api.add_resource( JobResource, "/jobs/<string:job_id>/", endpoint="job_by_id", resource_class_kwargs={"logger": _logger}, ) api.add_resource(JobList, "/jobs/", endpoint="jobs", resource_class_kwargs={"logger": _logger}) api.add_resource( JobExecutor, "/jobs/cancel/<string:job_id>/", endpoint="job_cancel", resource_class_kwargs={"logger": _logger}, ) def load_enterprise_resources(api: Api): """ Load enterprise resources. """ if not _using_enterprise(): return load_resources = _load_fct("taipy.enterprise.rest.api.views", "_load_resources") load_resources(api) load_enterprise_resources(api) def register_views(): apispec.spec.components.schema("DataNodeSchema", schema=DataNodeSchema) apispec.spec.path(view=DataNodeResource, app=current_app) apispec.spec.path(view=DataNodeList, app=current_app) apispec.spec.path(view=DataNodeReader, app=current_app) apispec.spec.path(view=DataNodeWriter, app=current_app) apispec.spec.components.schema("TaskSchema", schema=TaskSchema) apispec.spec.path(view=TaskResource, app=current_app) apispec.spec.path(view=TaskList, app=current_app) apispec.spec.path(view=TaskExecutor, app=current_app) apispec.spec.components.schema("SequenceSchema", schema=SequenceSchema) apispec.spec.path(view=SequenceResource, app=current_app) apispec.spec.path(view=SequenceList, app=current_app) apispec.spec.path(view=SequenceExecutor, app=current_app) apispec.spec.components.schema("ScenarioSchema", schema=ScenarioSchema) apispec.spec.path(view=ScenarioResource, app=current_app) apispec.spec.path(view=ScenarioList, app=current_app) apispec.spec.path(view=ScenarioExecutor, app=current_app) apispec.spec.components.schema("CycleSchema", schema=CycleSchema) apispec.spec.path(view=CycleResource, app=current_app) apispec.spec.path(view=CycleList, app=current_app) apispec.spec.components.schema("JobSchema", schema=JobSchema) apispec.spec.path(view=JobResource, app=current_app) apispec.spec.path(view=JobList, app=current_app) apispec.spec.path(view=JobExecutor, app=current_app) apispec.spec.components.schema( "Any", { "description": "Any value", "nullable": True, }, ) if _using_enterprise(): _register_views = _load_fct("taipy.enterprise.rest.api.views", "_register_views") _register_views(apispec)
|
from datetime import datetime from flask import request from flask_restful import Resource from taipy.config.common.frequency import Frequency from taipy.core import Cycle from taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from taipy.core.exceptions.exceptions import NonExistingCycle from ...commons.to_from_model import _to_model from ..middlewares._middleware import _middleware from ..schemas import CycleResponseSchema, CycleSchema REPOSITORY = "cycle" def _get_or_raise(cycle_id: str) -> None: manager = _CycleManagerFactory._build_manager() cycle = manager._get(cycle_id) if not cycle: raise NonExistingCycle(cycle_id) return cycle class CycleResource(Resource): """Single object resource --- get: tags: - api description: | Returns a `CycleSchema^` representing the unique `Cycle^` identified by the *cycle_id* given as parameter. If no cycle corresponds to *cycle_id*, a `404` error is returned. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/cycles/CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to retrieve. In case of success here is an example of the response: ``` JSON {"cycle": { "frequency": "Frequency.DAILY", "creation_date": "2022-08-04T17:13:32.797384", "id": "CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a", "start_date": "2022-08-04T00:00:00", "end_date": "2022-08-04T23:59:59.999999", "name": "Frequency.DAILY_2022-08-04T17:13:32.797384" ``` In case of failure here is an example of the response: ``` JSON {"message": "Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get("http://localhost:5000/api/v1/cycles/CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a") print(response) print(response.json()) ``` `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to retrieve. In case of success here is an output example: ``` <Response [200]> {'cycle': { 'frequency': 'Frequency.DAILY', 'creation_date': '2022-08-04T17:13:32.797384', 'id': 'CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a', 'start_date': '2022-08-04T00:00:00', 'end_date': '2022-08-04T23:59:59.999999', 'name': 'Frequency.DAILY_2022-08-04T17:13:32.797384' ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: cycle_id schema: type: string description: The identifier of the cycle to retrieve. responses: 200: content: application/json: schema: type: object properties: cycle: CycleSchema 404: description: No cycle has the *cycle_id* identifier. delete: tags: - api description: | Deletes the `Cycle^` identified by the *cycle_id* given as parameter. If the cycle does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X DELETE http://localhost:5000/api/v1/cycles/CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to delete. In case of success here is an example of the response: ``` JSON {"message": "Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a was deleted."} ``` In case of failure here is an example of the response: ``` JSON {"message": "Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete("http://localhost:5000/api/v1/cycles/CYCLE_797384_ef210412-af91-4f41-b6e8-74d1648edcba") print(response) print(response.json()) ``` `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to delete. In case of success here is an output example: ``` <Response [200]> {"message": "Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a was deleted."} ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: cycle_id schema: type: string description: The id of the cycle to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No cycle has the *cycle_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, cycle_id): schema = CycleResponseSchema() cycle = _get_or_raise(cycle_id) return {"cycle": schema.dump(_to_model(REPOSITORY, cycle))} @_middleware def delete(self, cycle_id): manager = _CycleManagerFactory._build_manager() _get_or_raise(cycle_id) manager._delete(cycle_id) return {"message": f"Cycle {cycle_id} was deleted."} class CycleList(Resource): """Creation and get_all --- get: tags: - api description: | Returns a `CycleSchema^` list representing all existing Cycles. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/cycles ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [ { "frequency": "Frequency.DAILY", "end_date": "2022-08-06T23:59:59.999999", "creation_date": "2022-08-06T15:45:50.223894", "start_date": "2022-08-06T00:00:00", "id": "CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a", "name": "Frequency.DAILY_2022-08-06T15:45:50.223894" } ] ``` If there is no cycle, the response is an empty list as follows: ``` JSON [] ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get("http://localhost:5000/api/v1/cycles") print(response) print(response.json()) ``` In case of success here is an output example: ``` <Response [200]> [{ "frequency": "Frequency.DAILY", "end_date": "2022-08-06T23:59:59.999999", "creation_date": "2022-08-06T15:45:50.223894", "start_date": "2022-08-06T00:00:00", "id": "CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a", "name": "Frequency.DAILY_2022-08-06T15:45:50.223894" } ] ``` If there is no cycle, the response is an empty list as follows: ``` <Response [200]> [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/CycleSchema' post: tags: - api description: | Creates a new cycle from the `CycleSchema^` given in the request body. !!! Example === "Curl" ```shell curl -X POST -H "Content-Type: application/json"\ -d '{"frequency": "DAILY", "properties": {}, "creation_date": "2020-01-01T00:00:00",\ "start_date": "2020-01-01T00:00:00", "end_date": "2020-01-01T00:00:00"}'\ http://localhost:5000/api/v1/cycles ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In the curl command line, a `CycleSchema^` is provided as JSON dictionary parameter with the curl option -d (--data) to specify the various attributes of the `Cycle^` to create: ``` JSON { "frequency": "DAILY", "properties": {}, "creation_date": "2020-01-01T00:00:00", "start_date": "2020-01-01T00:00:00", "end_date": "2020-01-01T00:00:00" } ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests cycle_schema = { "frequency": "DAILY", "properties": {}, "creation_date": "2020-01-01T00:00:00", "start_date": "2020-01-01T00:00:00", "end_date": "2020-01-01T00:00:00" } response = requests.post("http://localhost:5000/api/v1/cycles", json=cycle_schema) print(response) print(response.json()) ``` A `CycleSchema^` is provided as a dictionary to specify the various attributes of the `Cycle^` to create. Here is the output example: ``` <Response [201]> { 'message': 'Cycle was created.', 'cycle': { 'frequency': 'Frequency.DAILY', 'end_date': '2020-01-01T00:00:00', 'creation_date': '2020-01-01T00:00:00', 'start_date': '2020-01-01T00:00:00', 'id': 'CYCLE_c9cc527f-a8c8-4238-8f31-42166a9817db', 'name': 'Frequency.DAILY_2020-01-01T00:00:00', 'properties': {}}} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. requestBody: required: true content: application/json: schema: CycleSchema responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. cycle: CycleSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self): schema = CycleResponseSchema(many=True) manager = _CycleManagerFactory._build_manager() cycles = [_to_model(REPOSITORY, cycle) for cycle in manager._get_all()] return schema.dump(cycles) @_middleware def post(self): schema = CycleResponseSchema() manager = _CycleManagerFactory._build_manager() cycle = self.__create_cycle_from_schema(schema.load(request.json)) manager._set(cycle) return { "message": "Cycle was created.", "cycle": schema.dump(_to_model(REPOSITORY, cycle)), }, 201 def __create_cycle_from_schema(self, cycle_schema: CycleSchema): return Cycle( id=cycle_schema.get("id"), frequency=Frequency(getattr(Frequency, cycle_schema.get("frequency", "").upper())), properties=cycle_schema.get("properties", {}), creation_date=datetime.fromisoformat(cycle_schema.get("creation_date")), start_date=datetime.fromisoformat(cycle_schema.get("start_date")), end_date=datetime.fromisoformat(cycle_schema.get("end_date")), )
|
from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.exceptions.exceptions import NonExistingTask, NonExistingTaskConfig from taipy.core.task._task_manager_factory import _TaskManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import TaskSchema def _get_or_raise(task_id: str): manager = _TaskManagerFactory._build_manager() task = manager._get(task_id) if task is None: raise NonExistingTask(task_id) return task REPOSITORY = "task" class TaskResource(Resource): """Single object resource --- get: tags: - api summary: Get a task. description: | Return a single task by *task_id*. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string description: The identifier of the task. responses: 200: content: application/json: schema: type: object properties: task: TaskSchema 404: description: No task has the *task_id* identifier. delete: tags: - api summary: Delete a task. description: | Delete a task. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string description: The identifier of the task. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No task has the *task_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, task_id): schema = TaskSchema() task = _get_or_raise(task_id) return {"task": schema.dump(_to_model(REPOSITORY, task))} @_middleware def delete(self, task_id): manager = _TaskManagerFactory._build_manager() _get_or_raise(task_id) manager._delete(task_id) return {"message": f"Task {task_id} was deleted."} class TaskList(Resource): """Creation and get_all --- get: tags: - api summary: Get all tasks. description: | Return an array of all tasks. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/tasks ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/TaskSchema' post: tags: - api summary: Create a task. description: | Create a new task from its *config_id*. If the config does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/tasks?config_id=my_task_config ``` parameters: - in: query name: config_id schema: type: string description: The identifier of the task configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. task: TaskSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.tasks.get(config_id) if not config: raise NonExistingTaskConfig(config_id) return config @_middleware def get(self): schema = TaskSchema(many=True) manager = _TaskManagerFactory._build_manager() tasks = [_to_model(REPOSITORY, task) for task in manager._get_all()] return schema.dump(tasks) @_middleware def post(self): args = request.args config_id = args.get("config_id") schema = TaskSchema() manager = _TaskManagerFactory._build_manager() if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) task = manager._bulk_get_or_create([config])[0] return { "message": "Task was created.", "task": schema.dump(_to_model(REPOSITORY, task)), }, 201 class TaskExecutor(Resource): """Execute a task --- post: tags: - api summary: Execute a task. description: | Execute a task by *task_id*. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EXECUTOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/tasks/submit/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. task: TaskSchema 404: description: No task has the *task_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, task_id): manager = _TaskManagerFactory._build_manager() task = _get_or_raise(task_id) manager._orchestrator().submit_task(task) return {"message": f"Task {task_id} was submitted."}
|
import uuid from typing import Optional from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core import Job, JobId from taipy.core.exceptions.exceptions import NonExistingJob, NonExistingTaskConfig from taipy.core.job._job_manager_factory import _JobManagerFactory from taipy.core.task._task_manager_factory import _TaskManagerFactory from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import JobSchema def _get_or_raise(job_id: str): manager = _JobManagerFactory._build_manager() job = manager._get(job_id) if job is None: raise NonExistingJob(job_id) return job class JobResource(Resource): """Single object resource --- get: tags: - api summary: Get a job. description: | Return a single job by *job_id*. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string description: The identifier of the job. responses: 200: content: application/json: schema: type: object properties: job: JobSchema 404: description: No job has the *job_id* identifier. delete: tags: - api summary: Delete a job. description: | Delete a job. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string description: The identifier of the job. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No job has the *job_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, job_id): schema = JobSchema() job = _get_or_raise(job_id) return {"job": schema.dump(job)} @_middleware def delete(self, job_id): manager = _JobManagerFactory._build_manager() job = _get_or_raise(job_id) manager._delete(job) return {"message": f"Job {job_id} was deleted."} class JobList(Resource): """Creation and get_all --- get: tags: - api summary: Get all jobs. description: | Return an array of all jobs. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/jobs ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/JobSchema' post: tags: - api summary: Create a job. description: | Create a job from a task *config_id*. If the config does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/jobs?task_id=TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: query name: task_id schema: type: string description: The identifier of the task configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. job: JobSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.tasks.get(config_id) if not config: raise NonExistingTaskConfig(config_id) return config @_middleware def get(self): schema = JobSchema(many=True) manager = _JobManagerFactory._build_manager() jobs = manager._get_all() return schema.dump(jobs) @_middleware def post(self): args = request.args task_config_id = args.get("task_id") if not task_config_id: raise ConfigIdMissingException manager = _JobManagerFactory._build_manager() schema = JobSchema() job = self.__create_job_from_schema(task_config_id) manager._set(job) return { "message": "Job was created.", "job": schema.dump(job), }, 201 def __create_job_from_schema(self, task_config_id: str) -> Optional[Job]: task_manager = _TaskManagerFactory._build_manager() task = task_manager._bulk_get_or_create([self.fetch_config(task_config_id)])[0] return Job( id=JobId(f"JOB_{uuid.uuid4()}"), task=task, submit_id=f"SUBMISSION_{uuid.uuid4()}", submit_entity_id=task.id ) class JobExecutor(Resource): """Cancel a job --- post: tags: - api summary: Cancel a job. description: | Cancel a job by *job_id*. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EXECUTOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/jobs/cancel/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. job: JobSchema 404: description: No job has the *job_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, job_id): manager = _JobManagerFactory._build_manager() job = _get_or_raise(job_id) manager._cancel(job) return {"message": f"Job {job_id} was cancelled."}
|
from flask import request from flask_restful import Resource from taipy.core.exceptions.exceptions import NonExistingScenario, NonExistingSequence from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ScenarioIdMissingException, SequenceNameMissingException from ..middlewares._middleware import _middleware from ..schemas import SequenceResponseSchema def _get_or_raise(sequence_id: str): manager = _SequenceManagerFactory._build_manager() sequence = manager._get(sequence_id) if sequence is None: raise NonExistingSequence(sequence_id) return sequence REPOSITORY = "sequence" class SequenceResource(Resource): """Single object resource --- get: tags: - api summary: Get a sequence. description: | Return a single sequence by sequence_id. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_READER_ role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string description: The identifier of the sequence. responses: 200: content: application/json: schema: type: object properties: sequence: SequenceSchema 404: description: No sequence has the *sequence_id* identifier. delete: tags: - api summary: Delete a sequence. description: | Delete a sequence. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_EDITOR_ role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string description: The identifier of the sequence. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No sequence has the *sequence_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, sequence_id): schema = SequenceResponseSchema() sequence = _get_or_raise(sequence_id) return {"sequence": schema.dump(_to_model(REPOSITORY, sequence))} @_middleware def delete(self, sequence_id): manager = _SequenceManagerFactory._build_manager() _get_or_raise(sequence_id) manager._delete(sequence_id) return {"message": f"Sequence {sequence_id} was deleted."} class SequenceList(Resource): """Creation and get_all --- get: tags: - api summary: Get all sequences. description: | Return an array of all sequences. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_READER_ role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/sequences ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/SequenceSchema' post: tags: - api summary: Create a sequence. description: | Create a sequence from scenario_id, sequence_name and task_ids. If the scenario_id does not exist or sequence_name is not provided, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_EDITOR_ role. Code example: ```shell curl -X POST --data '{"scenario_id": "SCENARIO_scenario_id", "sequence_name": "sequence", "tasks": []}' http://localhost:5000/api/v1/sequences ``` parameters: - in: query name: scenario_id schema: type: string description: The Scenario the Sequence belongs to. name: sequence_name schema: type: string description: The name of the Sequence. name: tasks schema: type: list[string] description: A list of task id of the Sequence. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. sequence: SequenceSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self): schema = SequenceResponseSchema(many=True) manager = _SequenceManagerFactory._build_manager() sequences = [_to_model(REPOSITORY, sequence) for sequence in manager._get_all()] return schema.dump(sequences) @_middleware def post(self): sequence_data = request.json scenario_id = sequence_data.get("scenario_id") sequence_name = sequence_data.get("sequence_name") sequence_task_ids = sequence_data.get("task_ids", []) response_schema = SequenceResponseSchema() if not scenario_id: raise ScenarioIdMissingException if not sequence_name: raise SequenceNameMissingException scenario = _ScenarioManagerFactory._build_manager()._get(scenario_id) if not scenario: raise NonExistingScenario(scenario_id=scenario_id) scenario.add_sequence(sequence_name, sequence_task_ids) sequence = scenario.sequences[sequence_name] return { "message": "Sequence was created.", "sequence": response_schema.dump(_to_model(REPOSITORY, sequence)), }, 201 class SequenceExecutor(Resource): """Execute a sequence --- post: tags: - api summary: Execute a sequence. description: | Execute a sequence from sequence_id. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), This endpoint requires _TAIPY_EXECUTOR_ role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/sequences/submit/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. sequence: SequenceSchema 404: description: No sequence has the *sequence_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, sequence_id): _get_or_raise(sequence_id) manager = _SequenceManagerFactory._build_manager() manager._submit(sequence_id) return {"message": f"Sequence {sequence_id} was submitted."}
|
from .cycle import CycleList, CycleResource from .datanode import DataNodeList, DataNodeReader, DataNodeResource, DataNodeWriter from .job import JobExecutor, JobList, JobResource from .scenario import ScenarioExecutor, ScenarioList, ScenarioResource from .sequence import SequenceExecutor, SequenceList, SequenceResource from .task import TaskExecutor, TaskList, TaskResource __all__ = [ "DataNodeResource", "DataNodeList", "DataNodeReader", "DataNodeWriter", "TaskList", "TaskResource", "TaskExecutor", "SequenceList", "SequenceResource", "SequenceExecutor", "ScenarioList", "ScenarioResource", "ScenarioExecutor", "CycleResource", "CycleList", "JobResource", "JobList", "JobExecutor", ]
|
from typing import List import numpy as np import pandas as pd from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.core.data.operator import Operator from taipy.core.exceptions.exceptions import NonExistingDataNode, NonExistingDataNodeConfig from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import ( CSVDataNodeConfigSchema, DataNodeFilterSchema, DataNodeSchema, ExcelDataNodeConfigSchema, GenericDataNodeConfigSchema, InMemoryDataNodeConfigSchema, JSONDataNodeConfigSchema, PickleDataNodeConfigSchema, SQLTableDataNodeConfigSchema, SQLDataNodeConfigSchema, MongoCollectionDataNodeConfigSchema, ) ds_schema_map = { "csv": CSVDataNodeConfigSchema, "pickle": PickleDataNodeConfigSchema, "in_memory": InMemoryDataNodeConfigSchema, "sql_table": SQLTableDataNodeConfigSchema, "sql": SQLDataNodeConfigSchema, "mongo_collection": MongoCollectionDataNodeConfigSchema, "excel": ExcelDataNodeConfigSchema, "generic": GenericDataNodeConfigSchema, "json": JSONDataNodeConfigSchema, } REPOSITORY = "data" def _get_or_raise(data_node_id: str) -> None: manager = _DataManagerFactory._build_manager() data_node = manager._get(data_node_id) if not data_node: raise NonExistingDataNode(data_node_id) return data_node class DataNodeResource(Resource): """Single object resource --- get: tags: - api description: | Returns a `DataNodeSchema^` representing the unique `DataNode^` identified by the *datanode_id* given as parameter. If no data node corresponds to *datanode_id*, a `404` error is returned. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/datanodes/DATANODE_hist_cfg_75750ed8-4e09-4e00-958d -e352ee426cc9 ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `DATANODE_hist_cfg_75750ed8-4e09-4e00-958d-e352ee426cc9` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to retrieve. In case of success here is an example of the response: ``` JSON {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ``` In case of failure here is an example of the response: ``` JSON {"message":"DataNode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found"} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( "http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d") print(response) print(response.json()) ``` `DATANODE_hist_cfg_75750ed8-4e09-4e00-958d-e352ee426cc9` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to retrieve. In case of success here is an output example: ``` <Response [200]> {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ``` In case of failure here is an output example: ``` <Response [404]> {"message":"DataNode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found"} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: datanode_id schema: type: string description: The identifier of the data node to retrieve. responses: 200: content: application/json: schema: type: object properties: datanode: DataNodeSchema 404: description: No data node has the *datanode_id* identifier. delete: tags: - api summary: Delete a data node. description: | Deletes the `DataNode^` identified by the *datanode_id* given as parameter. If the data node does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X DELETE \ http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to delete. In case of success here is an example of the response: ``` JSON {"msg": "datanode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d deleted"} ``` In case of failure here is an example of the response: ``` JSON {"message": "Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete( "http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d") print(response) print(response.json()) ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the value of the *datanode_id* parameter. It represents the identifier of the Cycle we want to delete. In case of success here is an output example: ``` <Response [200]> {"msg": "Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d deleted."} ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: datanode_id schema: type: string description: The identifier of the data node to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No data node has the *datanode_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, datanode_id): schema = DataNodeSchema() datanode = _get_or_raise(datanode_id) return {"datanode": schema.dump(_to_model(REPOSITORY, datanode))} @_middleware def delete(self, datanode_id): _get_or_raise(datanode_id) manager = _DataManagerFactory._build_manager() manager._delete(datanode_id) return {"message": f"Data node {datanode_id} was deleted."} class DataNodeList(Resource): """Creation and get_all --- get: tags: - api description: | Returns a `DataNodeSchema^` list representing all existing data nodes. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/datanodes ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [ {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ] ``` If there is no data node, the response is an empty list as follows: ``` JSON [] ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get("http://localhost:5000/api/v1/datanodes") print(response) print(response.json()) ``` In case of success here is an output example: ``` <Response [200]> [ {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ] ``` If there is no data node, the response is an empty list as follows: ``` <Response [200]> [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/DataNodeSchema' post: tags: - api description: | Creates a new data node from the *config_id* given as parameter. !!! Example === "Curl" ```shell curl -X POST http://localhost:5000/api/v1/datanodes?config_id=historical_data_set ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In this example the *config_id* value ("historical_data_set") is given as parameter directly in the url. A corresponding `DataNodeConfig^` must exist and must have been configured before. Here is the output message example: ``` {"msg": "datanode created", "datanode": { "default_path": null, "path": "daily-min-temperatures.csv", "name": null, "storage_type": "csv", "scope": 2, "has_header": true} } ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post("http://localhost:5000/api/v1/datanodes?config_id=historical_data_set") print(response) print(response.json()) ``` In this example the *config_id* value ("historical_data_set") is given as parameter directly in the url. A corresponding `DataNodeConfig^` must exist and must have been configured before. Here is the output example: ``` <Response [201]> {'msg': 'datanode created', 'datanode': { 'name': None, 'scope': 2, 'path': 'daily-min-temperatures.csv', 'storage_type': 'csv', 'default_path': None, 'has_header': True}} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: query name: config_id schema: type: string description: The identifier of the data node configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. datanode: DataNodeSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.data_nodes.get(config_id) if not config: raise NonExistingDataNodeConfig(config_id) return config @_middleware def get(self): schema = DataNodeSchema(many=True) manager = _DataManagerFactory._build_manager() datanodes = [_to_model(REPOSITORY, datanode) for datanode in manager._get_all()] return schema.dump(datanodes) @_middleware def post(self): args = request.args config_id = args.get("config_id") if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) schema = ds_schema_map.get(config.storage_type)() manager = _DataManagerFactory._build_manager() manager._bulk_get_or_create({config}) return { "message": "Data node was created.", "datanode": schema.dump(config), }, 201 class DataNodeReader(Resource): """Single object resource --- get: tags: - api description: | Returns the data read from the data node identified by *datanode_id*. If the data node does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X GET \ http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d/read ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the *datanode_id* parameter. It represents the identifier of the data node to read. Here is an output example. In this case, the storage type of the data node to read is `csv`, and no exposed type is specified. The data is exposed as a list of dictionaries, each dictionary representing a raw of the csv file. ``` {"data": [ {"Date": "1981-01-01", "Temp": 20.7}, {"Date": "1981-01-02", "Temp": 17.9}, {"Date": "1981-01-03", "Temp": 18.8}, {"Date": "1981-01-04", "Temp": 14.6}, {"Date": "1981-01-05", "Temp": 15.8}, {"Date": "1981-01-06", "Temp": 15.8}, {"Date": "1981-01-07", "Temp": 15.8} ]} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( "http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d/read") print(response) print(response.json()) ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the *datanode_id* parameter. It represents the identifier of the data node to read. Here is an output example. In this case, the storage type of the data node to read is `csv`, and no exposed type is specified. The data is exposed as a list of dictionaries, each dictionary representing a raw of the csv file. ``` {"data": [ {"Date": "1981-01-01", "Temp": 20.7}, {"Date": "1981-01-02", "Temp": 17.9}, {"Date": "1981-01-03", "Temp": 18.8}, {"Date": "1981-01-04", "Temp": 14.6}, {"Date": "1981-01-05", "Temp": 15.8}, {"Date": "1981-01-06", "Temp": 15.8}, {"Date": "1981-01-07", "Temp": 15.8} ]} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: datanode_id schema: type: string description: The id of the data node to read. requestBody: content: application/json: schema: DataNodeFilterSchema responses: 200: content: application/json: schema: type: object properties: data: type: Any description: The data read from the data node. 404: description: No data node has the *datanode_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def __make_operators(self, schema: DataNodeFilterSchema) -> List: return [ ( x.get("key"), x.get("value"), Operator(getattr(Operator, x.get("operator", "").upper())), ) for x in schema.get("operators") ] @_middleware def get(self, datanode_id): schema = DataNodeFilterSchema() data = request.get_json(silent=True) data_node = _get_or_raise(datanode_id) operators = self.__make_operators(schema.load(data)) if data else [] data = data_node.filter(operators) if isinstance(data, pd.DataFrame): data = data.to_dict(orient="records") elif isinstance(data, np.ndarray): data = list(data) return {"data": data} class DataNodeWriter(Resource): """Single object resource --- put: tags: - api summary: Write into a data node. description: | Write data from request body into a data node by *datanode_id*. If the data node does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X PUT -d '[{"path": "/abc", "type": 1}, {"path": "/def", "type": 2}]' -H 'Content-Type: application/json' http://localhost:5000/api/v1/datanodes/DATANODE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9/write ``` parameters: - in: path name: datanode_id schema: type: string requestBody: content: application/json: schema: Any responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No data node has the *datanode_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def put(self, datanode_id): data = request.json data_node = _get_or_raise(datanode_id) data_node.write(data) return {"message": f"Data node {datanode_id} was successfully written."}
|
from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.exceptions.exceptions import NonExistingScenario, NonExistingScenarioConfig from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import ScenarioResponseSchema def _get_or_raise(scenario_id: str): manager = _ScenarioManagerFactory._build_manager() scenario = manager._get(scenario_id) if scenario is None: raise NonExistingScenario(scenario_id) return scenario REPOSITORY = "scenario" class ScenarioResource(Resource): """Single object resource --- get: tags: - api description: | Returns a `ScenarioSchema^` representing the unique scenario identified by *scenario_id*. If no scenario corresponds to *scenario_id*, a `404` error is returned. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to retrieve. In case of success here is an example of the response: ``` JSON {"scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true}} ``` In case of failure here is an example of the response: ``` JSON {"message": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( "http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Cycle we want to retrieve. In case of success here is an output example: ``` <Response [200]> {"scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true}} ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to retrieve. responses: 200: content: application/json: schema: type: object properties: scenario: ScenarioSchema 404: description: No scenario has the *scenario_id* identifier. delete: tags: - api description: | Delete the `Scenario^` scenario identified by the *scenario_id* given as parameter. If the scenario does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X DELETE http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the scenario we want to delete. In case of success here is an example of the response: ``` JSON {"msg": "Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c deleted."} ``` In case of failure here is an example of the response: ``` JSON {"message": "Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete( "http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to delete. In case of success here is an output example: ``` <Response [200]> {"msg": "Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c deleted."} ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No scenario has the *scenario_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, scenario_id): schema = ScenarioResponseSchema() scenario = _get_or_raise(scenario_id) return {"scenario": schema.dump(_to_model(REPOSITORY, scenario))} @_middleware def delete(self, scenario_id): manager = _ScenarioManagerFactory._build_manager() _get_or_raise(scenario_id) manager._delete(scenario_id) return {"message": f"Scenario {scenario_id} was deleted."} class ScenarioList(Resource): """Creation and get_all --- get: tags: - api summary: Get all scenarios. description: | Returns a `ScenarioSchema^` list representing all existing Scenarios. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/scenarios ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [{ "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true } ] ``` If there is no scenario, the response is an empty list as follows: ``` JSON [] ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get("http://localhost:5000/api/v1/scenarios") print(response) print(response.json()) ``` In case of success here is an output example: ``` <Response [200]> [{ "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true } ] ``` If there is no scenario, the response is an empty list as follows: ``` <Response [200]> [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/ScenarioSchema' post: tags: - api description: | Creates a new scenario from the *config_id*. If the config does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X POST http://localhost:5000/api/v1/scenarios?config_id=my_scenario_config ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In this example the *config_id* value ("my_scenario_config") is given as parameter directly in the url. A corresponding `ScenarioConfig^` must exist and must have been configured before. Here is the output message example: ``` {"msg": "scenario created.", "scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true} } ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post("http://localhost:5000/api/v1/scenarios?config_id=my_scenario_config") print(response) print(response.json()) ``` In this example the *config_id* value ("my_scenario_config") is given as parameter directly in the url. A corresponding `ScenarioConfig^` must exist and must have been configured before. Here is the output example: ``` <Response [201]> {"msg": "scenario created.", "scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true} } ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: query name: config_id schema: type: string description: The identifier of the scenario configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. scenario: ScenarioSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.scenarios.get(config_id) if not config: raise NonExistingScenarioConfig(config_id) return config @_middleware def get(self): schema = ScenarioResponseSchema(many=True) manager = _ScenarioManagerFactory._build_manager() scenarios = [_to_model(REPOSITORY, scenario) for scenario in manager._get_all()] return schema.dump(scenarios) @_middleware def post(self): args = request.args config_id = args.get("config_id") response_schema = ScenarioResponseSchema() manager = _ScenarioManagerFactory._build_manager() if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) scenario = manager._create(config) return { "message": "Scenario was created.", "scenario": response_schema.dump(_to_model(REPOSITORY, scenario)), }, 201 class ScenarioExecutor(Resource): """Execute a scenario --- post: tags: - api description: | Executes a scenario by *scenario_id*. If the scenario does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X POST http://localhost:5000/api/v1/scenarios/submit/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to submit. Here is the output message example: ``` {"message": "Executed scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post( "http://localhost:5000/api/v1/scenarios/submit/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to submit. Here is the output example: ``` <Response [202]> {"message": "Executed scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c."} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EXECUTOR` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to submit. responses: 202: content: application/json: schema: type: object properties: message: type: string description: Status message. scenario: ScenarioSchema 404: description: No scenario has the *scenario_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, scenario_id): _get_or_raise(scenario_id) manager = _ScenarioManagerFactory._build_manager() manager._submit(scenario_id) return {"message": f"Scenario {scenario_id} was submitted."}
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from functools import wraps from importlib import util from taipy.core.common._utils import _load_fct def _middleware(f): @wraps(f) def wrapper(*args, **kwargs): if _using_enterprise(): return _enterprise_middleware()(f)(*args, **kwargs) else: return f(*args, **kwargs) return wrapper def _using_enterprise(): return util.find_spec("taipy.enterprise") is not None def _enterprise_middleware(): return _load_fct("taipy.enterprise.rest.api.middlewares._middleware", "_middleware")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.