text
stringlengths
0
105k
import inspect import sys import typing as t from types import FrameType, ModuleType def _get_module_name_from_frame(frame: FrameType): return frame.f_globals["__name__"] if "__name__" in frame.f_globals else None def _get_module_name_from_imported_var(var_name: str, value: t.Any, sub_module_name: str) -> str: module_list = sys.modules.copy() # the provided sub_module_name are expected to contain only part of the full module_name provided by sys.modules # --> we can find potential matched modules based on the sub_module_name potential_matched_module = [m for m in module_list.keys() if m.endswith(sub_module_name)] for m in potential_matched_module: module = module_list[m] if hasattr(module, var_name) and getattr(module, var_name) is value: return m # failed fetching any matched module with variable and value return sub_module_name
# flake8: noqa: E402 import contextlib import typing as t import warnings from threading import Thread from urllib.parse import quote as urlquote from urllib.parse import urlparse warnings.filterwarnings( "ignore", category=UserWarning, message="You do not have a working installation of the service_identity module: 'No module named 'service_identity''.*", ) from twisted.internet import reactor from twisted.web.proxy import ProxyClient, ProxyClientFactory from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET, Site from .is_port_open import _is_port_open from .singleton import _Singleton if t.TYPE_CHECKING: from ..gui import Gui def _modifiedHandleResponseEnd(self): if self._finished: return self._finished = True with contextlib.suppress(Exception): self.father.finish() self.transport.loseConnection() setattr(ProxyClient, "handleResponseEnd", _modifiedHandleResponseEnd) class _TaipyReverseProxyResource(Resource): proxyClientFactoryClass = ProxyClientFactory def __init__(self, host, path, gui: "Gui", reactor=reactor): Resource.__init__(self) self.host = host self.path = path self.reactor = reactor self._gui = gui def getChild(self, path, request): return _TaipyReverseProxyResource( self.host, self.path + b"/" + urlquote(path, safe=b"").encode("utf-8"), self._gui, self.reactor, ) def _get_port(self): return self._gui._server._port def render(self, request): port = self._get_port() host = self.host if port == 80 else "%s:%d" % (self.host, port) request.requestHeaders.setRawHeaders(b"host", [host.encode("ascii")]) request.content.seek(0, 0) rest = self.path + b"?" + qs if (qs := urlparse(request.uri)[4]) else self.path clientFactory = self.proxyClientFactoryClass( request.method, rest, request.clientproto, request.getAllHeaders(), request.content.read(), request, ) self.reactor.connectTCP(self.host, port, clientFactory) return NOT_DONE_YET class NotebookProxy(object, metaclass=_Singleton): def __init__(self, gui: "Gui", listening_port: int) -> None: self._listening_port = listening_port self._gui = gui self._is_running = False def run(self): if self._is_running: return host = self._gui._get_config("host", "127.0.0.1") port = self._listening_port if _is_port_open(host, port): raise ConnectionError( f"Port {port} is already opened on {host}. You have another server application running on the same port." ) site = Site(_TaipyReverseProxyResource(host, b"", self._gui)) reactor.listenTCP(port, site) Thread(target=reactor.run, args=(False,)).start() self._is_running = True def stop(self): if not self._is_running: return self._is_running = False reactor.stop()
import typing as t from operator import attrgetter if t.TYPE_CHECKING: from ..gui import Gui def _getscopeattr(gui: "Gui", name: str, *more) -> t.Any: if more: return getattr(gui._get_data_scope(), name, more[0]) return getattr(gui._get_data_scope(), name) def _getscopeattr_drill(gui: "Gui", name: str) -> t.Any: return attrgetter(name)(gui._get_data_scope()) def _setscopeattr(gui: "Gui", name: str, value: t.Any): if gui._is_broadcasting(): for scope in gui._get_all_data_scopes().values(): setattr(scope, name, value) else: setattr(gui._get_data_scope(), name, value) def _setscopeattr_drill(gui: "Gui", name: str, value: t.Any): if gui._is_broadcasting(): for scope in gui._get_all_data_scopes().values(): _attrsetter(scope, name, value) else: _attrsetter(gui._get_data_scope(), name, value) def _hasscopeattr(gui: "Gui", name: str) -> bool: return hasattr(gui._get_data_scope(), name) def _delscopeattr(gui: "Gui", name: str): delattr(gui._get_data_scope(), name) def _attrsetter(obj: object, attr_str: str, value: object) -> None: var_name_split = attr_str.split(sep=".") for i in range(len(var_name_split) - 1): sub_name = var_name_split[i] obj = getattr(obj, sub_name) setattr(obj, var_name_split[-1], value)
from __future__ import annotations import typing as t from .._warnings import _warn from ..icon import Icon from . import _MapDict class _Adapter: def __init__(self): self.__adapter_for_type: t.Dict[str, t.Callable] = {} self.__type_for_variable: t.Dict[str, str] = {} self.__warning_by_type: t.Set[str] = set() def _add_for_type(self, type_name: str, adapter: t.Callable) -> None: self.__adapter_for_type[type_name] = adapter def _add_type_for_var(self, var_name: str, type_name: str) -> None: self.__type_for_variable[var_name] = type_name def _get_for_type(self, type_name: str) -> t.Optional[t.Callable]: return self.__adapter_for_type.get(type_name) def _get_unique_type(self, type_name: str) -> str: index = 0 while type_name in self.__adapter_for_type: type_name = f"{type_name}{index}" index += 1 return type_name def _run_for_var(self, var_name: str, value: t.Any, id_only=False) -> t.Any: ret = self._run(self.__get_for_var(var_name, value), value, var_name, id_only) return ret if ret is not None else value def __get_for_var(self, var_name: str, value: t.Any) -> t.Optional[t.Callable]: adapter = None type_name = self.__type_for_variable.get(var_name) if not isinstance(type_name, str): adapter = self.__adapter_for_type.get(var_name) type_name = var_name if callable(adapter) else type(value).__name__ if adapter is None: adapter = self.__adapter_for_type.get(type_name) return adapter if callable(adapter) else None def _get_elt_per_ids(self, var_name: str, lov: t.List[t.Any]) -> t.Dict[str, t.Any]: dict_res = {} adapter = self.__get_for_var(var_name, lov[0] if lov else None) for value in lov: try: result = adapter(value._dict if isinstance(value, _MapDict) else value) if adapter else value if result is not None: dict_res[self.__get_id(result)] = value children = self.__get_children(result) if children is not None: dict_res.update(self._get_elt_per_ids(var_name, children)) except Exception as e: _warn(f"Cannot run adapter for {var_name}", e) return dict_res def _run( self, adapter: t.Optional[t.Callable], value: t.Any, var_name: str, id_only=False ) -> t.Union[t.Tuple[str, ...], str, None]: if value is None: return None try: result = value._dict if isinstance(value, _MapDict) else value if adapter: result = adapter(result) if result is None: return result elif isinstance(result, str): return result tpl_res = self._get_valid_result(result, id_only) if tpl_res is None: _warn( f"Adapter for {var_name} did not return a valid result. Please check the documentation on List of Values Adapters." ) else: if not id_only and len(tpl_res) > 2 and isinstance(tpl_res[2], list) and len(tpl_res[2]) > 0: tpl_res = (tpl_res[0], tpl_res[1], self.__on_tree(adapter, tpl_res[2])) return ( (tpl_res + result[len(tpl_res) :]) if isinstance(result, tuple) and isinstance(tpl_res, tuple) else tpl_res ) except Exception as e: _warn(f"Cannot run adapter for {var_name}", e) return None def __on_tree(self, adapter: t.Optional[t.Callable], tree: t.List[t.Any]): ret_list = [] for elt in tree: ret = self._run(adapter, elt, adapter.__name__ if adapter else "adapter") if ret is not None: ret_list.append(ret) return ret_list def _get_valid_result(self, value: t.Any, id_only=False) -> t.Union[t.Tuple[str, ...], str, None]: id = self.__get_id(value) if id_only: return id label = self.__get_label(value) if label is None: return None children = self.__get_children(value) return (id, label) if children is None else (id, label, children) # type: ignore def __get_id(self, value: t.Any, dig=True) -> str: if isinstance(value, str): return value elif dig: if isinstance(value, (list, tuple)) and len(value): return self.__get_id(value[0], False) elif hasattr(value, "id"): return self.__get_id(value.id, False) elif hasattr(value, "__getitem__") and "id" in value: return self.__get_id(value.get("id"), False) if value is not None and type(value).__name__ not in self.__warning_by_type: _warn(f"LoV id must be a string, using a string representation of {type(value)}.") self.__warning_by_type.add(type(value).__name__) return "" if value is None else str(value) def __get_label(self, value: t.Any, dig=True) -> t.Union[str, t.Dict, None]: if isinstance(value, (str, Icon)): return Icon.get_dict_or(value) elif dig: if isinstance(value, (list, tuple)) and len(value) > 1: return self.__get_label(value[1], False) elif hasattr(value, "label"): return self.__get_label(value.label, False) elif hasattr(value, "__getitem__") and "label" in value: return self.__get_label(value["label"], False) return None def __get_children(self, value: t.Any) -> t.Optional[t.List[t.Any]]: if isinstance(value, (tuple, list)) and len(value) > 2: return value[2] if isinstance(value[2], list) else [value[2]] elif hasattr(value, "children"): return value.children if isinstance(value.children, list) else [value.children] elif hasattr(value, "__getitem__") and "children" in value: return value["children"] if isinstance(value["children"], list) else [value["children"]] return None
import typing as t def _get_css_var_value(value: t.Any) -> str: if isinstance(value, str): if " " in value: return f'"{value}"' return value if isinstance(value, int): return f"{value}px" return f"{value}"
import typing as t from types import ModuleType from ..page import Page def _get_page_from_module(module: ModuleType) -> t.Optional[Page]: return next((v for v in vars(module).values() if isinstance(v, Page)), None)
from __future__ import annotations import typing as t if t.TYPE_CHECKING: from ..gui import Gui def _varname_from_content(gui: Gui, content: str) -> t.Optional[str]: return next((k for k, v in gui._get_locals_bind().items() if isinstance(v, str) and v == content), None)
from __future__ import annotations import contextlib import typing as t from flask import g class _LocalsContext: __ctx_g_name = "locals_context" def __init__(self) -> None: self.__default_module: str = "" self._lc_stack: t.List[str] = [] self._locals_map: t.Dict[str, t.Dict[str, t.Any]] = {} def set_default(self, default: t.Dict[str, t.Any], default_module_name: str = "") -> None: self.__default_module = default_module_name self._locals_map[self.__default_module] = default def get_default(self) -> t.Dict[str, t.Any]: return self._locals_map[self.__default_module] def get_all_keys(self) -> t.Set[str]: keys = set() for v in self._locals_map.values(): for i in v.keys(): keys.add(i) return keys def get_all_context(self): return self._locals_map.keys() def add(self, context: t.Optional[str], locals_dict: t.Optional[t.Dict[str, t.Any]]): if context is not None and locals_dict is not None and context not in self._locals_map: self._locals_map[context] = locals_dict @contextlib.contextmanager def set_locals_context(self, context: t.Optional[str]) -> t.Iterator[None]: try: if context in self._locals_map: if hasattr(g, _LocalsContext.__ctx_g_name): self._lc_stack.append(getattr(g, _LocalsContext.__ctx_g_name)) setattr(g, _LocalsContext.__ctx_g_name, context) yield finally: if hasattr(g, _LocalsContext.__ctx_g_name): if len(self._lc_stack) > 0: setattr(g, _LocalsContext.__ctx_g_name, self._lc_stack.pop()) else: delattr(g, _LocalsContext.__ctx_g_name) def get_locals(self) -> t.Dict[str, t.Any]: return self.get_default() if (context := self.get_context()) is None else self._locals_map[context] def get_context(self) -> t.Optional[str]: return getattr(g, _LocalsContext.__ctx_g_name) if hasattr(g, _LocalsContext.__ctx_g_name) else None def is_default(self) -> bool: return self.get_default() == self.get_locals() def _get_locals_bind_from_context(self, context: t.Optional[str]): if context is None: context = self.__default_module return self._locals_map[context]
from ._attributes import ( _delscopeattr, _getscopeattr, _getscopeattr_drill, _hasscopeattr, _setscopeattr, _setscopeattr_drill, ) from ._locals_context import _LocalsContext from ._map_dict import _MapDict from ._runtime_manager import _RuntimeManager from ._variable_directory import _variable_decode, _variable_encode, _VariableDirectory from .boolean import _is_boolean, _is_boolean_true from .clientvarname import _get_broadcast_var_name, _get_client_var_name, _to_camel_case from .datatype import _get_data_type from .date import _date_to_string, _string_to_date from .expr_var_name import _get_expr_var_name from .filename import _get_non_existent_file_path from .filter_locals import _filter_locals from .get_imported_var import _get_imported_var from .get_module_name import _get_module_name_from_frame, _get_module_name_from_imported_var from .get_page_from_module import _get_page_from_module from .getdatecolstrname import _RE_PD_TYPE, _get_date_col_str_name from .html import _get_css_var_value from .is_debugging import is_debugging from .is_port_open import _is_port_open from .isnotebook import _is_in_notebook from .types import ( _TaipyBase, _TaipyBool, _TaipyContent, _TaipyContentHtml, _TaipyContentImage, _TaipyData, _TaipyDate, _TaipyDateRange, _TaipyDict, _TaipyLoNumbers, _TaipyLov, _TaipyLovValue, _TaipyNumber, ) from .varnamefromcontent import _varname_from_content
import re import typing as t __expr_var_name_index: t.Dict[str, int] = {} _RE_NOT_IN_VAR_NAME = r"[^A-Za-z0-9]+" def _get_expr_var_name(expr: str) -> str: var_name = re.sub(_RE_NOT_IN_VAR_NAME, "_", expr) index = 0 if var_name in __expr_var_name_index.keys(): index = __expr_var_name_index[var_name] __expr_var_name_index[var_name] = index + 1 return f"tp_{var_name}_{index}" def _reset_expr_var_name(): __expr_var_name_index.clear()
import socket def _is_port_open(host, port) -> bool: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((host, port)) sock.close() return result == 0
import json import typing as t from abc import ABC from datetime import datetime from .._warnings import _warn from . import _date_to_string, _MapDict, _string_to_date, _variable_decode class _TaipyBase(ABC): __HOLDER_PREFIXES: t.Optional[t.List[str]] = None _HOLDER_PREFIX = "_Tp" def __init__(self, data: t.Any, hash_name: str) -> None: self.__data = data self.__hash_name = hash_name def get(self): return self.__data def get_name(self): return self.__hash_name def set(self, data: t.Any): self.__data = data def cast_value(self, value: t.Any): return value def _get_readable_name(self): try: name, mod = _variable_decode( self.__hash_name[5:] if self.__hash_name.startswith("tpec_") else self.__hash_name ) return name if mod is None or mod == "__main__" else f"{mod}.{name}" except: return self.__hash_name @staticmethod def get_hash(): return NotImplementedError @staticmethod def _get_holder_prefixes() -> t.List[str]: if _TaipyBase.__HOLDER_PREFIXES is None: _TaipyBase.__HOLDER_PREFIXES = [cls.get_hash() + "_" for cls in _TaipyBase.__subclasses__()] return _TaipyBase.__HOLDER_PREFIXES class _TaipyData(_TaipyBase): @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "D" class _TaipyBool(_TaipyBase): def get(self): return self.cast_value(super().get()) def cast_value(self, value: t.Any): return bool(value) @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "B" class _TaipyNumber(_TaipyBase): def get(self): try: return float(super().get()) except Exception as e: raise TypeError(f"Variable '{self._get_readable_name()}' should hold a number: {e}") def cast_value(self, value: t.Any): if isinstance(value, str): try: return float(value) if value else 0.0 except Exception as e: _warn(f"{self._get_readable_name()}: Parsing {value} as float", e) return 0.0 return super().cast_value(value) @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "N" class _TaipyLoNumbers(_TaipyBase): def cast_value(self, value: t.Any): if isinstance(value, str): try: return list(map(lambda f: float(f), value[1:-1].split(","))) except Exception as e: _warn(f"{self._get_readable_name()}: Parsing {value} as an array of numbers", e) return [] return super().cast_value(value) @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "Ln" class _TaipyDate(_TaipyBase): def get(self): val = super().get() if isinstance(val, datetime): val = _date_to_string(val) elif val is not None: val = str(val) return val def cast_value(self, value: t.Any): if isinstance(value, str): return _string_to_date(value) return super().cast_value(value) @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "Dt" class _TaipyDateRange(_TaipyBase): def get(self): val = super().get() if isinstance(val, list): return [_date_to_string(v) if isinstance(v, datetime) else None if v is None else str(v) for v in val] return val def cast_value(self, value: t.Any): if isinstance(value, list): return [_string_to_date(v) if isinstance(v, str) else str(v) for v in value] return super().cast_value(value) @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "Dr" class _TaipyLovValue(_TaipyBase): @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "Lv" class _TaipyLov(_TaipyBase): @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "L" class _TaipyContent(_TaipyBase): @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "C" class _TaipyContentImage(_TaipyBase): @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "Ci" class _TaipyContentHtml(_TaipyBase): @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "Ch" class _TaipyDict(_TaipyBase): def get(self): val = super().get() return json.dumps(val._dict if isinstance(val, _MapDict) else val) @staticmethod def get_hash(): return _TaipyBase._HOLDER_PREFIX + "Di"
import sys def is_debugging() -> bool: """NOT DOCUMENTED""" return hasattr(sys, "gettrace") and sys.gettrace() is not None
import typing as t def _is_boolean_true(s: t.Union[bool, str]) -> bool: return ( s if isinstance(s, bool) else s.lower() in ["true", "1", "t", "y", "yes", "yeah", "sure"] if isinstance(s, str) else False ) def _is_boolean(s: t.Any) -> bool: if isinstance(s, bool): return True elif isinstance(s, str): return s.lower() in ["true", "1", "t", "y", "yes", "yeah", "sure", "false", "0", "f", "no"] else: return False
_replace_dict = {".": "__", "[": "_SqrOp_", "]": "_SqrCl_"} def _get_client_var_name(var_name: str) -> str: for k, v in _replace_dict.items(): var_name = var_name.replace(k, v) return var_name def _to_camel_case(value: str, upcase_first=False) -> str: if not isinstance(value, str): raise Exception("_to_camel_case allows only string parameter") if len(value) <= 1: return value.lower() value = value.replace("_", " ").title().replace(" ", "").replace("[", "_").replace("]", "_") return value[0].lower() + value[1:] if not upcase_first else value def _get_broadcast_var_name(s: str) -> str: return _get_client_var_name(f"_bc_{s}")
import re import typing as t from .._warnings import _warn from .boolean import _is_boolean, _is_boolean_true from .clientvarname import _to_camel_case def _get_column_desc(columns: t.Dict[str, t.Any], key: str) -> t.Optional[t.Dict[str, t.Any]]: return next((x for x in columns.values() if x.get("dfid") == key), None) def _get_name_indexed_property(attributes: t.Dict[str, t.Any], name: str) -> t.Dict[str, t.Any]: ret = {} index_re = re.compile(name + r"\[(.*)\]$") for key in attributes.keys(): if m := index_re.match(key): ret[m.group(1)] = attributes.get(key) return ret def _update_col_desc_from_indexed( attributes: t.Dict[str, t.Any], columns: t.Dict[str, t.Any], name: str, elt_name: str ): col_value = _get_name_indexed_property(attributes, name) for k, v in col_value.items(): if col_desc := next((x for x in columns.values() if x.get("dfid") == k), None): if col_desc.get(_to_camel_case(name)) is None: col_desc[_to_camel_case(name)] = str(v) else: _warn(f"{elt_name}: {name}[{k}] is not in the list of displayed columns.") def _enhance_columns( # noqa: C901 attributes: t.Dict[str, t.Any], hash_names: t.Dict[str, str], columns: t.Dict[str, t.Any], elt_name: str ): _update_col_desc_from_indexed(attributes, columns, "nan_value", elt_name) _update_col_desc_from_indexed(attributes, columns, "width", elt_name) filters = _get_name_indexed_property(attributes, "filter") for k, v in filters.items(): if _is_boolean_true(v): if col_desc := _get_column_desc(columns, k): col_desc["filter"] = True else: _warn(f"{elt_name}: filter[{k}] is not in the list of displayed columns.") editables = _get_name_indexed_property(attributes, "editable") for k, v in editables.items(): if _is_boolean(v): if col_desc := _get_column_desc(columns, k): col_desc["notEditable"] = not _is_boolean_true(v) else: _warn(f"{elt_name}: editable[{k}] is not in the list of displayed columns.") group_by = _get_name_indexed_property(attributes, "group_by") for k, v in group_by.items(): if _is_boolean_true(v): if col_desc := _get_column_desc(columns, k): col_desc["groupBy"] = True else: _warn(f"{elt_name}: group_by[{k}] is not in the list of displayed columns.") apply = _get_name_indexed_property(attributes, "apply") for k, v in apply.items(): # pragma: no cover if col_desc := _get_column_desc(columns, k): if callable(v): value = hash_names.get(f"apply[{k}]") elif isinstance(v, str): value = v.strip() else: _warn(f"{elt_name}: apply[{k}] should be a user or predefined function.") value = None if value: col_desc["apply"] = value else: _warn(f"{elt_name}: apply[{k}] is not in the list of displayed columns.") styles = _get_name_indexed_property(attributes, "style") for k, v in styles.items(): # pragma: no cover if col_desc := _get_column_desc(columns, k): if callable(v): value = hash_names.get(f"style[{k}]") elif isinstance(v, str): value = v.strip() else: value = None if value in columns.keys(): _warn(f"{elt_name}: style[{k}]={value} cannot be a column's name.") elif value: col_desc["style"] = value else: _warn(f"{elt_name}: style[{k}] is not in the list of displayed columns.") tooltips = _get_name_indexed_property(attributes, "tooltip") for k, v in tooltips.items(): # pragma: no cover if col_desc := _get_column_desc(columns, k): if callable(v): value = hash_names.get(f"tooltip[{k}]") elif isinstance(v, str): value = v.strip() else: value = None if value in columns.keys(): _warn(f"{elt_name}: tooltip[{k}]={value} cannot be a column's name.") elif value: col_desc["tooltip"] = value else: _warn(f"{elt_name}: tooltip[{k}] is not in the list of displayed columns.") return columns
import typing as t from datetime import datetime from random import random from ..data.data_scope import _DataScopes from ._map_dict import _MapDict if t.TYPE_CHECKING: from ..gui import Gui class _Bindings: def __init__(self, gui: "Gui") -> None: self.__gui = gui self.__scopes = _DataScopes() def _bind(self, name: str, value: t.Any) -> None: if hasattr(self, name): raise ValueError(f"Variable '{name}' is already bound") if not name.isidentifier(): raise ValueError(f"Variable name '{name}' is invalid") if isinstance(value, dict): setattr(self._get_data_scope(), name, _MapDict(value)) else: setattr(self._get_data_scope(), name, value) # prop = property(self.__value_getter(name), self.__value_setter(name)) setattr(_Bindings, name, self.__get_property(name)) def __get_property(self, name): def __setter(ud: _Bindings, value: t.Any): if isinstance(value, dict): value = _MapDict(value, None) ud.__gui._update_var(name, value) def __getter(ud: _Bindings) -> t.Any: value = getattr(ud._get_data_scope(), name) if isinstance(value, _MapDict): return _MapDict(value._dict, lambda k, v: ud.__gui._update_var(f"{name}.{k}", v)) else: return value return property(__getter, __setter) # Getter, Setter def _set_single_client(self, value: bool) -> None: self.__scopes.set_single_client(value) def _is_single_client(self) -> bool: return self.__scopes.is_single_client() def _get_or_create_scope(self, id: str): create = not id if create: id = f"{datetime.now().strftime('%Y%m%d%H%M%S%f')}-{random()}" self.__gui._send_ws_id(id) self.__scopes.create_scope(id) return id, create def _new_scopes(self): self.__scopes = _DataScopes() def _get_data_scope(self): return self.__scopes.get_scope(self.__gui._get_client_id()) def _get_all_scopes(self): return self.__scopes.get_all_scopes()
import re import typing as t from types import FrameType from ._locals_context import _LocalsContext from .get_imported_var import _get_imported_var from .get_module_name import _get_module_name_from_frame, _get_module_name_from_imported_var class _VariableDirectory: def __init__(self, locals_context: _LocalsContext): self._locals_context = locals_context self._default_module = "" self._var_dir: t.Dict[str, t.Dict] = {} self._var_head: t.Dict[str, t.List[t.Tuple[str, str]]] = {} self._imported_var_dir: t.Dict[str, t.List[t.Tuple[str, str, str]]] = {} def set_default(self, frame: FrameType) -> None: self._default_module = _get_module_name_from_frame(frame) self.add_frame(frame) def add_frame(self, frame: t.Optional[FrameType]) -> None: if frame is None: return module_name = _get_module_name_from_frame(frame) if module_name not in self._imported_var_dir: imported_var_list = _get_imported_var(frame) self._imported_var_dir[module_name] = imported_var_list def pre_process_module_import_all(self) -> None: for imported_dir in self._imported_var_dir.values(): additional_var_list: t.List[t.Tuple[str, str, str]] = [] for name, asname, module in imported_dir: if name != "*" or asname != "*": continue if module not in self._locals_context._locals_map.keys(): continue with self._locals_context.set_locals_context(module): additional_var_list.extend( (v, v, module) for v in self._locals_context.get_locals().keys() if not v.startswith("_") ) imported_dir.extend(additional_var_list) def process_imported_var(self) -> None: self.pre_process_module_import_all() default_imported_dir = self._imported_var_dir[self._default_module] with self._locals_context.set_locals_context(self._default_module): for name, asname, module in default_imported_dir: if name == "*" and asname == "*": continue imported_module_name = _get_module_name_from_imported_var( name, self._locals_context.get_locals().get(asname, None), module ) temp_var_name = self.add_var(asname, self._default_module) self.add_var(name, imported_module_name, temp_var_name) for k, v in self._imported_var_dir.items(): with self._locals_context.set_locals_context(k): for name, asname, module in v: if name == "*" and asname == "*": continue imported_module_name = _get_module_name_from_imported_var( name, self._locals_context.get_locals().get(asname, None), module ) var_name = self.get_var(name, imported_module_name) var_asname = self.get_var(asname, k) if var_name is None and var_asname is None: temp_var_name = self.add_var(asname, k) self.add_var(name, imported_module_name, temp_var_name) elif var_name is not None: self.add_var(asname, k, var_name) else: self.add_var(name, imported_module_name, var_asname) def add_var(self, name: str, module: t.Optional[str], var_name: t.Optional[str] = None) -> str: if module is None: module = self._default_module if gv := self.get_var(name, module): return gv var_encode = _variable_encode(name, module) if module != self._default_module else name if var_name is None: var_name = var_encode self.__add_var_head(name, module, var_name) if var_encode != var_name: var_name_decode, module_decode = _variable_decode(var_name) if module_decode is None: module_decode = self._default_module self.__add_var_head(var_name_decode, module_decode, var_encode) if name not in self._var_dir: self._var_dir[name] = {module: var_name} else: self._var_dir[name][module] = var_name return var_name def __add_var_head(self, name: str, module: str, var_head: str) -> None: if var_head not in self._var_head: self._var_head[var_head] = [(name, module)] else: self._var_head[var_head].append((name, module)) def get_var(self, name: str, module: str) -> t.Optional[str]: if name in self._var_dir and module in self._var_dir[name]: return self._var_dir[name][module] return None _MODULE_NAME_MAP: t.List[str] = [] _MODULE_ID = "_TPMDL_" _RE_TPMDL_DECODE = re.compile(r"(.*?)" + _MODULE_ID + r"(\d+)$") def _variable_encode(var_name: str, module_name: t.Optional[str]): if module_name is None: return var_name if module_name not in _MODULE_NAME_MAP: _MODULE_NAME_MAP.append(module_name) return f"{var_name}{_MODULE_ID}{_MODULE_NAME_MAP.index(module_name)}" def _variable_decode(var_name: str): from ._evaluator import _Evaluator if result := _RE_TPMDL_DECODE.match(var_name): return _Evaluator._expr_decode(str(result[1])), _MODULE_NAME_MAP[int(result[2])] return _Evaluator._expr_decode(var_name), None def _reset_name_map(): _MODULE_NAME_MAP.clear()
from typing import Dict class _Singleton(type): _instances: Dict = {} def __call__(self, *args, **kwargs): if self not in self._instances: self._instances[self] = super(_Singleton, self).__call__(*args, **kwargs) return self._instances[self]
from __future__ import annotations import ast import builtins import re import typing as t from .._warnings import _warn if t.TYPE_CHECKING: from ..gui import Gui from . import ( _get_client_var_name, _get_expr_var_name, _getscopeattr, _getscopeattr_drill, _hasscopeattr, _MapDict, _setscopeattr, _setscopeattr_drill, _TaipyBase, _variable_decode, _variable_encode, ) class _Evaluator: # Regex to separate content from inside curly braces when evaluating f string expressions __EXPR_RE = re.compile(r"\{(([^\}]*)([^\{]*))\}") __EXPR_IS_EXPR = re.compile(r"[^\\][{}]") __EXPR_IS_EDGE_CASE = re.compile(r"^\s*{([^}]*)}\s*$") __EXPR_VALID_VAR_EDGE_CASE = re.compile(r"^([a-zA-Z\.\_0-9\[\]]*)$") __EXPR_EDGE_CASE_F_STRING = re.compile(r"[\{]*[a-zA-Z_][a-zA-Z0-9_]*:.+") __IS_TAIPYEXPR_RE = re.compile(r"TpExPr_(.*)") def __init__(self, default_bindings: t.Dict[str, t.Any], shared_variable: t.List[str]) -> None: # key = expression, value = hashed value of the expression self.__expr_to_hash: t.Dict[str, str] = {} # key = hashed value of the expression, value = expression self.__hash_to_expr: t.Dict[str, str] = {} # key = variable name of the expression, key = list of related expressions # ex: {x + y} # "x_TPMDL_0": ["{x + y}"], # "y_TPMDL_0": ["{x + y}"], self.__var_to_expr_list: t.Dict[str, t.List[str]] = {} # key = expression, value = list of related variables # "{x + y}": {"x": "x_TPMDL_", "y": "y_TPMDL_0"} self.__expr_to_var_map: t.Dict[str, t.Dict[str, str]] = {} # instead of binding everywhere the types self.__global_ctx = default_bindings # expr to holders self.__expr_to_holders: t.Dict[str, t.Set[t.Type[_TaipyBase]]] = {} # shared variables between multiple clients self.__shared_variable = shared_variable @staticmethod def _expr_decode(s: str): return str(result[1]) if (result := _Evaluator.__IS_TAIPYEXPR_RE.match(s)) else s def get_hash_from_expr(self, expr: str) -> str: return self.__expr_to_hash.get(expr, expr) def get_expr_from_hash(self, hash_val: str) -> str: return self.__hash_to_expr.get(hash_val, hash_val) def get_shared_variables(self) -> t.List[str]: return self.__shared_variable def _is_expression(self, expr: str) -> bool: return len(_Evaluator.__EXPR_IS_EXPR.findall(expr)) != 0 def _fetch_expression_list(self, expr: str) -> t.List: return [v[0] for v in _Evaluator.__EXPR_RE.findall(expr)] def _analyze_expression(self, gui: Gui, expr: str) -> t.Tuple[t.Dict[str, t.Any], t.Dict[str, str]]: var_val: t.Dict[str, t.Any] = {} var_map: t.Dict[str, str] = {} non_vars = list(self.__global_ctx.keys()) non_vars.extend(dir(builtins)) # Get a list of expressions (value that has been wrapped in curly braces {}) and find variables to bind for e in self._fetch_expression_list(expr): var_name = e.split(sep=".")[0] st = ast.parse('f"{' + e + '}"' if _Evaluator.__EXPR_EDGE_CASE_F_STRING.match(e) else e) args = [arg.arg for node in ast.walk(st) if isinstance(node, ast.arguments) for arg in node.args] targets = [ compr.target.id for node in ast.walk(st) if isinstance(node, ast.ListComp) for compr in node.generators # type: ignore ] for node in ast.walk(st): if isinstance(node, ast.Name): var_name = node.id.split(sep=".")[0] if var_name not in args and var_name not in targets and var_name not in non_vars: try: encoded_var_name = gui._bind_var(var_name) var_val[var_name] = _getscopeattr_drill(gui, encoded_var_name) var_map[var_name] = encoded_var_name except AttributeError as e: _warn(f"Variable '{var_name}' is not defined (in expression '{expr}')", e) return var_val, var_map def __save_expression( self, gui: Gui, expr: str, expr_hash: t.Optional[str], expr_evaluated: t.Optional[t.Any], var_map: t.Dict[str, str], ): if expr in self.__expr_to_hash: expr_hash = self.__expr_to_hash[expr] gui._bind_var_val(expr_hash, expr_evaluated) return expr_hash if expr_hash is None: expr_hash = _get_expr_var_name(expr) else: # edge case, only a single variable expr_hash = f"tpec_{_get_client_var_name(expr)}" self.__expr_to_hash[expr] = expr_hash gui._bind_var_val(expr_hash, expr_evaluated) self.__hash_to_expr[expr_hash] = expr for var in var_map.values(): if var not in self.__global_ctx.keys(): lst = self.__var_to_expr_list.get(var) if lst is None: self.__var_to_expr_list[var] = [expr] else: lst.append(expr) if expr not in self.__expr_to_var_map: self.__expr_to_var_map[expr] = var_map # save expr_hash to shared variable if valid for encoded_var_name in var_map.values(): var_name, module_name = _variable_decode(encoded_var_name) # only variables in the main module with be taken into account if module_name is not None and module_name != gui._get_default_module_name(): continue if var_name in self.__shared_variable: self.__shared_variable.append(expr_hash) return expr_hash def evaluate_bind_holder(self, gui: Gui, holder: t.Type[_TaipyBase], expr: str) -> str: expr_hash = self.__expr_to_hash.get(expr, "unknownExpr") hash_name = self.__get_holder_hash(holder, expr_hash) expr_lit = expr.replace("'", "\\'") holder_expr = f"{holder.__name__}({expr},'{expr_lit}')" self.__evaluate_holder(gui, holder, expr) if a_set := self.__expr_to_holders.get(expr): a_set.add(holder) else: self.__expr_to_holders[expr] = {holder} self.__expr_to_hash[holder_expr] = hash_name # expression is only the first part ... expr = expr.split(".")[0] self.__expr_to_var_map[holder_expr] = {expr: expr} if a_list := self.__var_to_expr_list.get(expr): if holder_expr not in a_list: a_list.append(holder_expr) else: self.__var_to_expr_list[expr] = [holder_expr] return hash_name def evaluate_holders(self, gui: Gui, expr: str) -> t.List[str]: lst = [] for hld in self.__expr_to_holders.get(expr, []): hash_val = self.__get_holder_hash(hld, self.__expr_to_hash.get(expr, "")) self.__evaluate_holder(gui, hld, expr) lst.append(hash_val) return lst @staticmethod def __get_holder_hash(holder: t.Type[_TaipyBase], expr_hash: str) -> str: return f"{holder.get_hash()}_{_get_client_var_name(expr_hash)}" def __evaluate_holder(self, gui: Gui, holder: t.Type[_TaipyBase], expr: str) -> t.Optional[_TaipyBase]: try: expr_hash = self.__expr_to_hash.get(expr, "unknownExpr") holder_hash = self.__get_holder_hash(holder, expr_hash) expr_value = _getscopeattr_drill(gui, expr_hash) holder_value = _getscopeattr(gui, holder_hash, None) if not isinstance(holder_value, _TaipyBase): holder_value = holder(expr_value, expr_hash) _setscopeattr(gui, holder_hash, holder_value) else: holder_value.set(expr_value) return holder_value except Exception as e: _warn(f"Cannot evaluate expression {holder.__name__}({expr_hash},'{expr_hash}') for {expr}", e) return None def evaluate_expr(self, gui: Gui, expr: str) -> t.Any: if not self._is_expression(expr): return expr var_val, var_map = self._analyze_expression(gui, expr) expr_hash = None is_edge_case = False # The expr_string is placed here in case expr get replaced by edge case expr_string = 'f"' + expr.replace('"', '\\"') + '"' # simplify expression if it only contains var_name m = _Evaluator.__EXPR_IS_EDGE_CASE.match(expr) if m and not _Evaluator.__EXPR_EDGE_CASE_F_STRING.match(expr): expr = m.group(1) expr_hash = expr if _Evaluator.__EXPR_VALID_VAR_EDGE_CASE.match(expr) else None is_edge_case = True # validate whether expression has already been evaluated module_name = gui._get_locals_context() not_encoded_expr = expr expr = f"TpExPr_{_variable_encode(expr, module_name)}" if expr in self.__expr_to_hash and _hasscopeattr(gui, self.__expr_to_hash[expr]): return self.__expr_to_hash[expr] try: # evaluate expressions ctx: t.Dict[str, t.Any] = {} ctx.update(self.__global_ctx) # entries in var_val are not always seen (NameError) when passed as locals ctx.update(var_val) expr_evaluated = eval(not_encoded_expr if is_edge_case else expr_string, ctx) except Exception as e: _warn(f"Cannot evaluate expression '{not_encoded_expr if is_edge_case else expr_string}'", e) expr_evaluated = None # save the expression if it needs to be re-evaluated return self.__save_expression(gui, expr, expr_hash, expr_evaluated, var_map) def refresh_expr(self, gui: Gui, var_name: str, holder: t.Optional[_TaipyBase]): """ This function will execute when the __request_var_update function receive a refresh order """ expr = self.__hash_to_expr.get(var_name) if expr: expr_decoded, _ = _variable_decode(expr) var_map = self.__expr_to_var_map.get(expr, {}) eval_dict = {k: _getscopeattr_drill(gui, gui._bind_var(v)) for k, v in var_map.items()} if self._is_expression(expr_decoded): expr_string = 'f"' + _variable_decode(expr)[0].replace('"', '\\"') + '"' else: expr_string = expr_decoded try: ctx: t.Dict[str, t.Any] = {} ctx.update(self.__global_ctx) ctx.update(eval_dict) expr_evaluated = eval(expr_string, ctx) _setscopeattr(gui, var_name, expr_evaluated) if holder is not None: holder.set(expr_evaluated) except Exception as e: _warn(f"Exception raised evaluating {expr_string}", e) def re_evaluate_expr(self, gui: Gui, var_name: str) -> t.Set[str]: """ This function will execute when the _update_var function is handling an expression with only a single variable """ modified_vars: t.Set[str] = set() # Verify that the current hash is an edge case one (only a single variable inside the original expression) if var_name.startswith("tp_"): return modified_vars expr_original = None # if var_name starts with tpec_ --> it is an edge case with modified var if var_name.startswith("tpec_"): # backup for later reference var_name_original = var_name expr_original = self.__hash_to_expr[var_name] temp_expr_var_map = self.__expr_to_var_map[expr_original] if len(temp_expr_var_map) <= 1: # since this is an edge case --> only 1 item in the dict and that item is the original var for v in temp_expr_var_map.values(): var_name = v # construct correct var_path to reassign values var_name_full, _ = _variable_decode(expr_original) var_name_full = var_name_full.split(".") var_name_full[0] = var_name var_name_full = ".".join(var_name_full) _setscopeattr_drill(gui, var_name_full, _getscopeattr(gui, var_name_original)) else: # multiple key-value pair in expr_var_map --> expr is special case a["b"] key = "" for v in temp_expr_var_map.values(): if isinstance(_getscopeattr(gui, v), _MapDict): var_name = v else: key = v if key == "": return modified_vars _setscopeattr_drill(gui, f"{var_name}.{_getscopeattr(gui, key)}", _getscopeattr(gui, var_name_original)) # A middle check to see if var_name is from _MapDict if "." in var_name: var_name = var_name[: var_name.index(".")] # otherwise, thar var_name is correct and doesn't require any resolution if var_name not in self.__var_to_expr_list: # _warn("{var_name} not found.") return modified_vars # refresh expressions and holders for expr in self.__var_to_expr_list[var_name]: expr_decoded, _ = _variable_decode(expr) hash_expr = self.__expr_to_hash.get(expr, "UnknownExpr") if expr != var_name and not expr.startswith(_TaipyBase._HOLDER_PREFIX): expr_var_map = self.__expr_to_var_map.get(expr) # ["x", "y"] if expr_var_map is None: _warn(f"Something is amiss with expression list for {expr}.") else: eval_dict = {k: _getscopeattr_drill(gui, gui._bind_var(v)) for k, v in expr_var_map.items()} if self._is_expression(expr_decoded): expr_string = 'f"' + _variable_decode(expr)[0].replace('"', '\\"') + '"' else: expr_string = expr_decoded try: ctx: t.Dict[str, t.Any] = {} ctx.update(self.__global_ctx) ctx.update(eval_dict) expr_evaluated = eval(expr_string, ctx) _setscopeattr(gui, hash_expr, expr_evaluated) except Exception as e: _warn(f"Exception raised evaluating {expr_string}", e) # refresh holders if any for h in self.__expr_to_holders.get(expr, []): holder_hash = self.__get_holder_hash(h, self.get_hash_from_expr(expr)) if holder_hash not in modified_vars: _setscopeattr(gui, holder_hash, self.__evaluate_holder(gui, h, expr)) modified_vars.add(holder_hash) modified_vars.add(hash_expr) return modified_vars def _get_instance_in_context(self, name: str): return self.__global_ctx.get(name)
import re import pandas as pd def _get_data_type(value): if pd.api.types.is_bool_dtype(value): return "bool" elif pd.api.types.is_integer_dtype(value): return "int" elif pd.api.types.is_float_dtype(value): return "float" return re.match(r"^<class '(.*\.)?(.*?)(\d\d)?'>", str(type(value))).group(2)
import ast import inspect import typing as t from types import FrameType def _get_imported_var(frame: FrameType) -> t.List[t.Tuple[str, str, str]]: st = ast.parse(inspect.getsource(frame)) var_list: t.List[t.Tuple[str, str, str]] = [] for node in ast.walk(st): if isinstance(node, ast.ImportFrom): # get the imported element as (name, asname, module) # ex: from module1 import a as x --> ("a", "x", "module1") var_list.extend( ( child_node.name, child_node.asname if child_node.asname is not None else child_node.name, node.module or "", ) for child_node in node.names ) return var_list
import re import typing as t from datetime import date, datetime, time from dateutil import parser from pytz import utc from .._warnings import _warn def _date_to_string(date_val: t.Union[datetime, date, time]) -> str: if isinstance(date_val, datetime): # return date.isoformat() + 'Z', if possible try: return date_val.astimezone(utc).isoformat() except Exception as e: # astimezone() fails on Windows for pre-epoch times # See https://bugs.python.org/issue36759 _warn("Exception raised converting date to ISO 8601", e) return date_val.isoformat() def _string_to_date(date_str: str) -> t.Union[datetime, date]: # return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ') # return datetime.fromisoformat(date_str) date = parser.parse(date_str) date_regex = r"^[A-Z][a-z]{2} (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{2} \d{4}$" return date.date() if re.match(date_regex, date_str) else date
from importlib import util def _is_in_notebook(): # pragma: no cover try: if not util.find_spec("IPython"): return False from IPython import get_ipython ipython = get_ipython() if ipython is None or "IPKernelApp" not in ipython.config: return False except (ImportError, AttributeError): return False return True
import typing as t from pathlib import Path def _get_non_existent_file_path(dir_path: Path, file_name: str) -> Path: if not file_name: file_name = "taipy_file.bin" file_path = dir_path / file_name index = 0 file_stem = file_path.stem file_suffix = file_path.suffix while file_path.exists(): file_path = dir_path / f"{file_stem}.{index}{file_suffix}" index += 1 return file_path
import re import typing as t _RE_PD_TYPE = re.compile(r"^([^\s\d\[]+)(\d+)(\[(.*,\s(\S+))\])?") def _get_date_col_str_name(columns: t.List[str], col: str) -> str: suffix = "_str" while col + suffix in columns: suffix += "_" return col + suffix
import typing as t import numpy import pandas as pd from ..gui import Gui from .data_format import _DataFormat from .pandas_data_accessor import _PandasDataAccessor class _NumpyDataAccessor(_PandasDataAccessor): __types = (numpy.ndarray,) @staticmethod def get_supported_classes() -> t.List[str]: return [t.__name__ for t in _NumpyDataAccessor.__types] # type: ignore def __get_dataframe(self, value: t.Any) -> pd.DataFrame: return pd.DataFrame(value) def get_col_types(self, var_name: str, value: t.Any) -> t.Union[None, t.Dict[str, str]]: # type: ignore if isinstance(value, _NumpyDataAccessor.__types): # type: ignore return super().get_col_types(var_name, self.__get_dataframe(value)) return None def get_data( # noqa: C901 self, guiApp: Gui, var_name: str, value: t.Any, payload: t.Dict[str, t.Any], data_format: _DataFormat ) -> t.Dict[str, t.Any]: if isinstance(value, _NumpyDataAccessor.__types): # type: ignore return super().get_data(guiApp, var_name, self.__get_dataframe(value), payload, data_format) return {}
import typing as t import pandas as pd from ..gui import Gui from ..utils import _MapDict from .data_format import _DataFormat from .pandas_data_accessor import _PandasDataAccessor class _ArrayDictDataAccessor(_PandasDataAccessor): __types = (dict, list, tuple, _MapDict) @staticmethod def get_supported_classes() -> t.List[str]: return [t.__name__ for t in _ArrayDictDataAccessor.__types] # type: ignore def __get_dataframe(self, value: t.Any) -> t.Union[t.List[pd.DataFrame], pd.DataFrame]: if isinstance(value, list): if not value or isinstance(value[0], (str, int, float, bool)): return pd.DataFrame({"0": value}) types = {type(x) for x in value} if len(types) == 1: type_elt = next(iter(types), None) if type_elt == list: lengths = {len(x) for x in value} return ( pd.DataFrame(value) if len(lengths) == 1 else [pd.DataFrame({f"{i}/0": v}) for i, v in enumerate(value)] ) elif type_elt == dict: return [pd.DataFrame(v) for v in value] elif type_elt == _MapDict: return [pd.DataFrame(v._dict) for v in value] elif type_elt == pd.DataFrame: return value elif len(types) == 2 and list in types and pd.DataFrame in types: return [v if isinstance(v, pd.DataFrame) else pd.DataFrame({f"{i}/0": v}) for i, v in enumerate(value)] elif isinstance(value, _MapDict): return pd.DataFrame(value._dict) return pd.DataFrame(value) def get_col_types(self, var_name: str, value: t.Any) -> t.Union[None, t.Dict[str, str]]: # type: ignore if isinstance(value, _ArrayDictDataAccessor.__types): # type: ignore return super().get_col_types(var_name, self.__get_dataframe(value)) return None def get_data( # noqa: C901 self, guiApp: Gui, var_name: str, value: t.Any, payload: t.Dict[str, t.Any], data_format: _DataFormat ) -> t.Dict[str, t.Any]: if isinstance(value, _ArrayDictDataAccessor.__types): # type: ignore return super().get_data(guiApp, var_name, self.__get_dataframe(value), payload, data_format) return {}
from .data_accessor import _DataAccessor from .decimator import LTTB, RDP, MinMaxDecimator, ScatterDecimator from .utils import Decimator
import inspect import typing as t from abc import ABC, abstractmethod from .._warnings import _warn from ..utils import _TaipyData from .data_format import _DataFormat class _DataAccessor(ABC): _WS_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" @staticmethod @abstractmethod def get_supported_classes() -> t.List[str]: pass @abstractmethod def get_data( self, guiApp: t.Any, var_name: str, value: t.Any, payload: t.Dict[str, t.Any], data_format: _DataFormat ) -> t.Dict[str, t.Any]: pass @abstractmethod def get_col_types(self, var_name: str, value: t.Any) -> t.Dict[str, str]: pass class _InvalidDataAccessor(_DataAccessor): @staticmethod def get_supported_classes() -> t.List[str]: return [type(None).__name__] def get_data( self, guiApp: t.Any, var_name: str, value: t.Any, payload: t.Dict[str, t.Any], data_format: _DataFormat ) -> t.Dict[str, t.Any]: return {} def get_col_types(self, var_name: str, value: t.Any) -> t.Dict[str, str]: return {} class _DataAccessors(object): def __init__(self) -> None: self.__access_4_type: t.Dict[str, _DataAccessor] = {} self.__invalid_data_accessor = _InvalidDataAccessor() self.__data_format = _DataFormat.JSON from .array_dict_data_accessor import _ArrayDictDataAccessor from .numpy_data_accessor import _NumpyDataAccessor from .pandas_data_accessor import _PandasDataAccessor self._register(_PandasDataAccessor) self._register(_ArrayDictDataAccessor) self._register(_NumpyDataAccessor) def _register(self, cls: t.Type[_DataAccessor]) -> None: if not inspect.isclass(cls): raise AttributeError("The argument of 'DataAccessors.register' should be a class") if not issubclass(cls, _DataAccessor): raise TypeError(f"Class {cls.__name__} is not a subclass of DataAccessor") names = cls.get_supported_classes() if not names: raise TypeError(f"method {cls.__name__}.get_supported_classes returned an invalid value") # check existence inst: t.Optional[_DataAccessor] = None for name in names: inst = self.__access_4_type.get(name) if inst: break if inst is None: try: inst = cls() except Exception as e: raise TypeError(f"Class {cls.__name__} cannot be instanciated") from e if inst: for name in names: self.__access_4_type[name] = inst # type: ignore def __get_instance(self, value: _TaipyData) -> _DataAccessor: # type: ignore value = value.get() access = self.__access_4_type.get(type(value).__name__) if access is None: _warn(f"Can't find Data Accessor for type {type(value).__name__}.") return self.__invalid_data_accessor return access def _get_data( self, guiApp: t.Any, var_name: str, value: _TaipyData, payload: t.Dict[str, t.Any] ) -> t.Dict[str, t.Any]: return self.__get_instance(value).get_data(guiApp, var_name, value.get(), payload, self.__data_format) def _get_col_types(self, var_name: str, value: _TaipyData) -> t.Dict[str, str]: return self.__get_instance(value).get_col_types(var_name, value.get()) def _set_data_format(self, data_format: _DataFormat): self.__data_format = data_format
from __future__ import annotations import typing as t from abc import ABC, abstractmethod import numpy as np from .._warnings import _warn if t.TYPE_CHECKING: import pandas as pd class Decimator(ABC): """Base class for decimating chart data. *Decimating* is the term used to name the process of reducing the number of data points displayed in charts while retaining the overall shape of the traces. `Decimator` is a base class that does decimation on data sets. Taipy GUI comes out-of-the-box with several implementation of this class for different use cases. """ _CHART_MODES: t.List[str] = [] def __init__(self, threshold: t.Optional[int], zoom: t.Optional[bool]) -> None: """Initialize a new `Decimator`. Arguments: threshold (Optional[int]): The minimum amount of data points before the decimator class is applied. zoom (Optional[bool]): set to True to reapply the decimation when zoom or re-layout events are triggered. """ super().__init__() self.threshold = threshold self._zoom = zoom if zoom is not None else True def _is_applicable(self, data: t.Any, nb_rows_max: int, chart_mode: str): if chart_mode not in self._CHART_MODES: _warn(f"{type(self).__name__} is only applicable for {' '.join(self._CHART_MODES)}.") return False if self.threshold is None: if nb_rows_max < len(data): return True elif self.threshold < len(data): return True return False @abstractmethod def decimate(self, data: np.ndarray, payload: t.Dict[str, t.Any]) -> np.ndarray: """Decimate function. This method is executed when the appropriate conditions specified in the constructor are met. This function implements the algorithm that determines which data points are kept or dropped. Arguments: data (numpy.array): An array containing all the data points represented as tuples. payload (Dict[str, any]): additional information on charts that is provided at runtime. Returns: An array of Boolean mask values. The array should set True or False for each of its indexes where True indicates that the corresponding data point from *data* should be preserved, or False requires that this data point be dropped. """ return NotImplementedError # type: ignore def _df_data_filter( dataframe: pd.DataFrame, x_column_name: t.Optional[str], y_column_name: str, z_column_name: str, decimator: Decimator, payload: t.Dict[str, t.Any], is_copied: bool, ): df = dataframe.copy() if not is_copied else dataframe if not x_column_name: index = 0 while f"tAiPy_index_{index}" in df.columns: index += 1 x_column_name = f"tAiPy_index_{index}" df[x_column_name] = df.index column_list = [x_column_name, y_column_name, z_column_name] if z_column_name else [x_column_name, y_column_name] points = df[column_list].to_numpy() mask = decimator.decimate(points, payload) return df[mask], is_copied def _df_relayout( dataframe: pd.DataFrame, x_column: t.Optional[str], y_column: str, chart_mode: str, x0: t.Optional[float], x1: t.Optional[float], y0: t.Optional[float], y1: t.Optional[float], is_copied: bool, ): if chart_mode not in ["lines+markers", "markers"]: return dataframe, is_copied # if chart data is invalid if x0 is None or x1 is None or y0 is None or y1 is None: return dataframe, is_copied df = dataframe.copy() if not is_copied else dataframe is_copied = True has_x_col = True if not x_column: index = 0 while f"tAiPy_index_{index}" in df.columns: index += 1 x_column = f"tAiPy_index_{index}" df[x_column] = df.index has_x_col = False # if chart_mode is empty if chart_mode == "lines+markers": # only filter by x column df = df.loc[(df[x_column] > x0) & (df[x_column] < x1)] else: # filter by both x and y columns df = df.loc[(df[x_column] > x0) & (df[x_column] < x1) & (df[y_column] > y0) & (df[y_column] < y1)] # noqa if not has_x_col: df.drop(x_column, axis=1, inplace=True) return df, is_copied
from enum import Enum class _DataFormat(Enum): JSON = "JSON" APACHE_ARROW = "ARROW"
import base64 import pathlib import tempfile import typing as t import urllib.parse from importlib import util from pathlib import Path from sys import getsizeof from .._warnings import _warn from ..utils import _get_non_existent_file_path, _variable_decode _has_magic_module = False if util.find_spec("magic"): import magic _has_magic_module = True class _ContentAccessor: def __init__(self, data_url_max_size: int) -> None: self.__content_paths: t.Dict[str, pathlib.Path] = {} self.__url_is_image: t.Dict[str, bool] = {} self.__paths: t.Dict[pathlib.Path, str] = {} self.__data_url_max_size = data_url_max_size self.__temp_dir_path = Path(tempfile.gettempdir()) def get_path(self, path: pathlib.Path) -> str: url_path = self.__paths.get(path) if url_path is None: self.__paths[path] = url_path = "taipyStatic" + str(len(self.__paths)) return url_path def get_content_path( self, url_path: str, file_name: str, bypass: t.Optional[str] ) -> t.Tuple[t.Union[pathlib.Path, None], bool]: content_path = self.__content_paths.get(url_path) if not content_path: return (None, True) return (content_path, bypass is not None or self.__url_is_image.get(f"{url_path}/{file_name}", False)) def __get_mime_from_file(self, path: pathlib.Path): if _has_magic_module: try: return magic.from_file(str(path), mime=True) except Exception as e: _warn(f"Exception reading mime type in '{path}'", e) return None def __get_display_name(self, var_name: str) -> str: if not isinstance(var_name, str): return var_name if var_name.startswith("_tpC_"): var_name = var_name[5:] if var_name.startswith("tpec_"): var_name = var_name[5:] return _variable_decode(var_name)[0] def get_info(self, var_name: str, value: t.Any, image: bool) -> t.Union[str, t.Tuple[str], t.Any]: # noqa: C901 if value is None: return "" newvalue = value mime = None if not isinstance(newvalue, (str, pathlib.Path)) and ( getsizeof(newvalue) > self.__data_url_max_size or not _has_magic_module ): # write data to file file_name = "TaiPyContent.bin" if _has_magic_module: try: mime = magic.from_buffer(value, mime=True) file_name = "TaiPyContent." + mime.split("/")[-1] except Exception as e: _warn(f"{self.__get_display_name(var_name)} ({type(value)}) cannot be typed", e) file_path = _get_non_existent_file_path(self.__temp_dir_path, file_name) try: with open(file_path, "wb") as temp_file: temp_file.write(value) except Exception as e: _warn(f"{self.__get_display_name(var_name)} ({type(value)}) cannot be written to file {file_path}", e) newvalue = file_path if isinstance(newvalue, (str, pathlib.Path)): path = pathlib.Path(newvalue) if isinstance(newvalue, str) else newvalue if not path.is_file(): if ( var_name != "Gui.download" and not str(path).startswith("http") and not str(path).startswith("/") and not str(path).strip().lower().startswith("<svg") ): _warn(f"{self.__get_display_name(var_name)}: file '{value}' does not exist.") return str(value) if image: if not mime: mime = self.__get_mime_from_file(path) if mime and not mime.startswith("image"): _warn(f"{self.__get_display_name(var_name)}: file '{path}' mime type ({mime}) is not an image.") return f"Invalid content: {mime}" dir_path = path.resolve().parent url_path = self.get_path(dir_path) self.__content_paths[url_path] = dir_path file_url = f"{url_path}/{path.name}" self.__url_is_image[file_url] = image return (urllib.parse.quote_plus(file_url, safe="/"),) elif _has_magic_module: try: mime = magic.from_buffer(value, mime=True) if not image or mime.startswith("image"): return f"data:{mime};base64," + str(base64.b64encode(value), "utf-8") _warn(f"{self.__get_display_name(var_name)}: ({type(value)}) does not have an image mime type: {mime}.") return f"Invalid content: {mime}" except Exception as e: _warn(f"{self.__get_display_name(var_name)}: ({type(value)}) cannot be base64 encoded", e) return "Cannot be base64 encoded" else: _warn( "python-magic (and python-magic-bin on Windows) packages need to be installed if you want to process content as an array of bytes." ) return "Cannot guess content type"
from __future__ import annotations import typing as t from types import SimpleNamespace from .._warnings import _warn class _DataScopes: _GLOBAL_ID = "global" def __init__(self) -> None: self.__scopes: t.Dict[str, SimpleNamespace] = {_DataScopes._GLOBAL_ID: SimpleNamespace()} self.__single_client = True def set_single_client(self, value: bool) -> None: self.__single_client = value def is_single_client(self) -> bool: return self.__single_client def get_scope(self, client_id: t.Optional[str]) -> SimpleNamespace: if self.__single_client: return self.__scopes[_DataScopes._GLOBAL_ID] # global context in case request is not registered or client_id is not available (such as in the context of running tests) if not client_id: _warn("Empty session id, using global scope instead.") return self.__scopes[_DataScopes._GLOBAL_ID] if client_id not in self.__scopes: _warn( f"Session id {client_id} not found in data scope. Taipy will automatically create a scope for this session id but you may have to reload your page." ) self.create_scope(client_id) return self.__scopes[client_id] def get_all_scopes(self) -> t.Dict[str, SimpleNamespace]: return self.__scopes def create_scope(self, id: str) -> None: if self.__single_client: return if id is None: _warn("Empty session id, might be due to unestablished WebSocket connection.") return if id not in self.__scopes: self.__scopes[id] = SimpleNamespace() def delete_scope(self, id: str) -> None: # pragma: no cover if self.__single_client: return if id is None: _warn("Empty session id, might be due to unestablished WebSocket connection.") return if id in self.__scopes: del self.__scopes[id]
import typing as t from datetime import datetime from importlib import util import numpy as np import pandas as pd from .._warnings import _warn from ..gui import Gui from ..types import PropertyType from ..utils import _RE_PD_TYPE, _get_date_col_str_name from .data_accessor import _DataAccessor from .data_format import _DataFormat from .utils import _df_data_filter, _df_relayout _has_arrow_module = False if util.find_spec("pyarrow"): _has_arrow_module = True import pyarrow as pa class _PandasDataAccessor(_DataAccessor): __types = (pd.DataFrame,) __INDEX_COL = "_tp_index" __AGGREGATE_FUNCTIONS: t.List[str] = ["count", "sum", "mean", "median", "min", "max", "std", "first", "last"] @staticmethod def get_supported_classes() -> t.List[str]: return [t.__name__ for t in _PandasDataAccessor.__types] # type: ignore @staticmethod def __user_function( row: pd.Series, gui: Gui, column_name: t.Optional[str], user_function: t.Callable, function_name: str ) -> str: # pragma: no cover args = [] if column_name: args.append(row[column_name]) args.extend((row.name, row)) if column_name: args.append(column_name) try: return str(gui._call_function_with_state(user_function, args)) except Exception as e: _warn(f"Exception raised when calling user function {function_name}()", e) return "" def __is_date_column(self, data: pd.DataFrame, col_name: str) -> bool: col_types = data.dtypes[data.dtypes.index.astype(str) == col_name] return len(col_types[col_types.astype(str).str.startswith("datetime")]) > 0 # type: ignore def __build_transferred_cols( self, gui: Gui, payload_cols: t.Any, dataframe: pd.DataFrame, styles: t.Optional[t.Dict[str, str]] = None, tooltips: t.Optional[t.Dict[str, str]] = None, is_copied: t.Optional[bool] = False, new_indexes: t.Optional[np.ndarray] = None, handle_nan: t.Optional[bool] = False, ) -> pd.DataFrame: if isinstance(payload_cols, list) and len(payload_cols): col_types = dataframe.dtypes[dataframe.dtypes.index.astype(str).isin(payload_cols)] else: col_types = dataframe.dtypes cols = col_types.index.astype(str).tolist() if styles: if not is_copied: # copy the df so that we don't "mess" with the user's data dataframe = dataframe.copy() is_copied = True for k, v in styles.items(): col_applied = False func = gui._get_user_function(v) if callable(func): col_applied = self.__apply_user_function(gui, func, k if k in cols else None, v, dataframe, "tps__") if not col_applied: dataframe[v] = v cols.append(col_applied or v) if tooltips: if not is_copied: # copy the df so that we don't "mess" with the user's data dataframe = dataframe.copy() is_copied = True for k, v in tooltips.items(): col_applied = False func = gui._get_user_function(v) if callable(func): col_applied = self.__apply_user_function(gui, func, k if k in cols else None, v, dataframe, "tpt__") cols.append(col_applied or v) # deal with dates datecols = col_types[col_types.astype(str).str.startswith("datetime")].index.tolist() # type: ignore if len(datecols) != 0: if not is_copied: # copy the df so that we don't "mess" with the user's data dataframe = dataframe.copy() tz = Gui._get_timezone() for col in datecols: newcol = _get_date_col_str_name(cols, col) cols.append(newcol) re_type = _RE_PD_TYPE.match(str(col_types[col])) grps = re_type.groups() if re_type else () if len(grps) > 4 and grps[4]: dataframe[newcol] = ( dataframe[col] .dt.tz_convert("UTC") .dt.strftime(_DataAccessor._WS_DATE_FORMAT) .astype(str) .replace("nan", "NaT" if handle_nan else None) ) else: dataframe[newcol] = ( dataframe[col] .dt.tz_localize(tz) .dt.tz_convert("UTC") .dt.strftime(_DataAccessor._WS_DATE_FORMAT) .astype(str) .replace("nan", "NaT" if handle_nan else None) ) # remove the date columns from the list of columns cols = list(set(cols) - set(datecols)) dataframe = dataframe.iloc[new_indexes] if new_indexes is not None else dataframe dataframe = dataframe.loc[:, dataframe.dtypes[dataframe.dtypes.index.astype(str).isin(cols)].index] # type: ignore return dataframe def __apply_user_function( self, gui: Gui, user_function: t.Callable, column_name: t.Optional[str], function_name: str, data: pd.DataFrame, prefix: t.Optional[str], ): try: new_col_name = f"{prefix}{column_name}__{function_name}" if column_name else function_name data[new_col_name] = data.apply( _PandasDataAccessor.__user_function, axis=1, args=(gui, column_name, user_function, function_name), ) return new_col_name except Exception as e: _warn(f"Exception raised when invoking user function {function_name}()", e) return False def __format_data( self, data: pd.DataFrame, data_format: _DataFormat, orient: str, start: t.Optional[int] = None, rowcount: t.Optional[int] = None, data_extraction: t.Optional[bool] = None, handle_nan: t.Optional[bool] = False, ) -> t.Dict[str, t.Any]: ret: t.Dict[str, t.Any] = { "format": str(data_format.value), } if rowcount is not None: ret["rowcount"] = rowcount if start is not None: ret["start"] = start if data_extraction is not None: ret["dataExtraction"] = data_extraction # Extract data out of dictionary on front-end if data_format == _DataFormat.APACHE_ARROW: if not _has_arrow_module: raise RuntimeError("Cannot use Arrow as pyarrow package is not installed") # Convert from pandas to Arrow table = pa.Table.from_pandas(data) # Create sink buffer stream sink = pa.BufferOutputStream() # Create Stream writer writer = pa.ipc.new_stream(sink, table.schema) # Write data to table writer.write_table(table) writer.close() # End buffer stream buf = sink.getvalue() # Convert buffer to Python bytes and return ret["data"] = buf.to_pybytes() ret["orient"] = orient else: # Workaround for Python built in JSON encoder that does not yet support ignore_nan ret["data"] = data.replace([np.nan, pd.NA], [None, None]).to_dict(orient=orient) # type: ignore return ret def get_col_types(self, var_name: str, value: t.Any) -> t.Union[None, t.Dict[str, str]]: # type: ignore if isinstance(value, _PandasDataAccessor.__types): # type: ignore return {str(k): v for k, v in value.dtypes.apply(lambda x: x.name.lower()).items()} elif isinstance(value, list): ret_dict: t.Dict[str, str] = {} for i, v in enumerate(value): ret_dict.update({f"{i}/{k}": v for k, v in v.dtypes.apply(lambda x: x.name.lower()).items()}) return ret_dict return None def __get_data( # noqa: C901 self, gui: Gui, var_name: str, value: pd.DataFrame, payload: t.Dict[str, t.Any], data_format: _DataFormat, col_prefix: t.Optional[str] = "", ) -> t.Dict[str, t.Any]: columns = payload.get("columns", []) if col_prefix: columns = [c[len(col_prefix) :] if c.startswith(col_prefix) else c for c in columns] ret_payload = {"pagekey": payload.get("pagekey", "unknown page")} paged = not payload.get("alldata", False) is_copied = False # add index if not chart if paged: if _PandasDataAccessor.__INDEX_COL not in value.columns: value = value.copy() is_copied = True value[_PandasDataAccessor.__INDEX_COL] = value.index if columns and _PandasDataAccessor.__INDEX_COL not in columns: columns.append(_PandasDataAccessor.__INDEX_COL) # filtering filters = payload.get("filters") if isinstance(filters, list) and len(filters) > 0: query = "" vars = [] for fd in filters: col = fd.get("col") val = fd.get("value") action = fd.get("action") if isinstance(val, str): if self.__is_date_column(value, col): val = datetime.fromisoformat(val[:-1]) vars.append(val) val = f"@vars[{len(vars) - 1}]" if isinstance(val, (str, datetime)) else val right = f".str.contains({val})" if action == "contains" else f" {action} {val}" if query: query += " and " query += f"`{col}`{right}" try: value = value.query(query) is_copied = True except Exception as e: _warn(f"Dataframe filtering: invalid query '{query}' on {value.head()}", e) if paged: aggregates = payload.get("aggregates") applies = payload.get("applies") if isinstance(aggregates, list) and len(aggregates) and isinstance(applies, dict): applies_with_fn = { k: v if v in _PandasDataAccessor.__AGGREGATE_FUNCTIONS else gui._get_user_function(v) for k, v in applies.items() } for col in columns: if col not in applies_with_fn.keys(): applies_with_fn[col] = "first" try: value = value.groupby(aggregates).agg(applies_with_fn) except Exception: _warn(f"Cannot aggregate {var_name} with groupby {aggregates} and aggregates {applies}.") inf = payload.get("infinite") if inf is not None: ret_payload["infinite"] = inf # real number of rows is needed to calculate the number of pages rowcount = len(value) # here we'll deal with start and end values from payload if present if isinstance(payload["start"], int): start = int(payload["start"]) else: try: start = int(str(payload["start"]), base=10) except Exception: _warn(f'start should be an int value {payload["start"]}.') start = 0 if isinstance(payload["end"], int): end = int(payload["end"]) else: try: end = int(str(payload["end"]), base=10) except Exception: end = -1 if start < 0 or start >= rowcount: start = 0 if end < 0 or end >= rowcount: end = rowcount - 1 # deal with sort order_by = payload.get("orderby") if isinstance(order_by, str) and len(order_by): try: if value.columns.dtype.name == "int64": order_by = int(order_by) new_indexes = value[order_by].values.argsort(axis=0) if payload.get("sort") == "desc": # reverse order new_indexes = new_indexes[::-1] new_indexes = new_indexes[slice(start, end + 1)] except Exception: _warn(f"Cannot sort {var_name} on columns {order_by}.") new_indexes = slice(start, end + 1) # type: ignore else: new_indexes = slice(start, end + 1) # type: ignore value = self.__build_transferred_cols( gui, columns, value, styles=payload.get("styles"), tooltips=payload.get("tooltips"), is_copied=is_copied, new_indexes=new_indexes, handle_nan=payload.get("handlenan", False), ) dictret = self.__format_data( value, data_format, "records", start, rowcount, handle_nan=payload.get("handlenan", False) ) else: ret_payload["alldata"] = True decimator_payload: t.Dict[str, t.Any] = payload.get("decimatorPayload", {}) decimators = decimator_payload.get("decimators", []) nb_rows_max = decimator_payload.get("width") for decimator_pl in decimators: decimator = decimator_pl.get("decimator") decimator_instance = ( gui._get_user_instance(decimator, PropertyType.decimator.value) if decimator is not None else None ) if isinstance(decimator_instance, PropertyType.decimator.value): x_column, y_column, z_column = ( decimator_pl.get("xAxis", ""), decimator_pl.get("yAxis", ""), decimator_pl.get("zAxis", ""), ) chart_mode = decimator_pl.get("chartMode", "") if decimator_instance._zoom and "relayoutData" in decimator_payload and not z_column: relayoutData = decimator_payload.get("relayoutData", {}) x0 = relayoutData.get("xaxis.range[0]") x1 = relayoutData.get("xaxis.range[1]") y0 = relayoutData.get("yaxis.range[0]") y1 = relayoutData.get("yaxis.range[1]") value, is_copied = _df_relayout( value, x_column, y_column, chart_mode, x0, x1, y0, y1, is_copied ) if nb_rows_max and decimator_instance._is_applicable(value, nb_rows_max, chart_mode): try: value, is_copied = _df_data_filter( value, x_column, y_column, z_column, decimator=decimator_instance, payload=decimator_payload, is_copied=is_copied, ) gui._call_on_change(f"{var_name}.{decimator}.nb_rows", len(value)) except Exception as e: _warn(f"Limit rows error with {decimator} for Dataframe", e) value = self.__build_transferred_cols(gui, columns, value, is_copied=is_copied) dictret = self.__format_data(value, data_format, "list", data_extraction=True) ret_payload["value"] = dictret return ret_payload def get_data( self, gui: Gui, var_name: str, value: t.Any, payload: t.Dict[str, t.Any], data_format: _DataFormat ) -> t.Dict[str, t.Any]: if isinstance(value, list): # If is_chart data if payload.get("alldata", False): ret_payload = { "alldata": True, "value": {"multi": True}, "pagekey": payload.get("pagekey", "unknown page"), } data = [] for i, v in enumerate(value): ret = ( self.__get_data(gui, var_name, v, payload, data_format, f"{i}/") if isinstance(v, pd.DataFrame) else {} ) ret_val = ret.get("value", {}) data.append(ret_val.pop("data", None)) ret_payload.get("value", {}).update(ret_val) ret_payload["value"]["data"] = data return ret_payload else: value = value[0] if isinstance(value, _PandasDataAccessor.__types): # type: ignore return self.__get_data(gui, var_name, value, payload, data_format) return {}
import typing as t import numpy as np from ..utils import Decimator class ScatterDecimator(Decimator): """A decimator designed for scatter charts. This algorithm fits the data points into a grid. If multiple points are in the same grid cell, depending on the chart configuration, some points are removed to reduce the number points being displayed. This class can only be used with scatter charts. """ _CHART_MODES = ["markers"] def __init__( self, binning_ratio: t.Optional[float] = None, max_overlap_points: t.Optional[int] = None, threshold: t.Optional[int] = None, zoom: t.Optional[bool] = True, ): """Initialize a new `ScatterDecimator`. Arguments: binning_ratio (Optional[float]): the size of the data grid for the algorithm. The higher the value, the smaller the grid size. max_overlap_points (Optional(int)): the maximum number of points for a single cell within the data grid within the algorithm. This dictates how dense a single grid cell can be. threshold (Optional[int]): The minimum amount of data points before the decimation is applied. zoom (Optional[bool]): set to True to reapply the decimation when zoom or re-layout events are triggered. """ super().__init__(threshold, zoom) binning_ratio = binning_ratio if binning_ratio is not None else 1 self._binning_ratio = binning_ratio if binning_ratio > 0 else 1 self._max_overlap_points = max_overlap_points if max_overlap_points is not None else 3 def decimate(self, data: np.ndarray, payload: t.Dict[str, t.Any]) -> np.ndarray: n_rows = data.shape[0] mask = np.empty(n_rows, dtype=bool) width = payload.get("width", None) height = payload.get("height", None) if width is None or height is None: mask.fill(True) return mask mask.fill(False) grid_x, grid_y = round(width / self._binning_ratio), round(height / self._binning_ratio) x_col, y_col = data[:, 0], data[:, 1] min_x, max_x = np.amin(x_col), np.amax(x_col) min_y, max_y = np.amin(y_col), np.amax(y_col) min_max_x_diff, min_max_y_diff = max_x - min_x, max_y - min_y x_grid_map = np.rint((x_col - min_x) * grid_x / min_max_x_diff).astype(int) y_grid_map = np.rint((y_col - min_y) * grid_y / min_max_y_diff).astype(int) z_grid_map = None grid_shape = (grid_x + 1, grid_y + 1) if len(data[0]) == 3: grid_z = grid_x grid_shape = (grid_x + 1, grid_y + 1, grid_z + 1) # type: ignore z_col = data[:, 2] min_z, max_z = np.amin(z_col), np.amax(z_col) min_max_z_diff = max_z - min_z z_grid_map = np.rint((z_col - min_z) * grid_z / min_max_z_diff).astype(int) grid = np.empty(grid_shape, dtype=int) grid.fill(0) if z_grid_map is not None: for i in np.arange(n_rows): if grid[x_grid_map[i], y_grid_map[i], z_grid_map[i]] < self._max_overlap_points: grid[x_grid_map[i], y_grid_map[i], z_grid_map[i]] += 1 mask[i] = True else: for i in np.arange(n_rows): if grid[x_grid_map[i], y_grid_map[i]] < self._max_overlap_points: grid[x_grid_map[i], y_grid_map[i]] += 1 mask[i] = True return mask
from .lttb import LTTB from .minmax import MinMaxDecimator from .rdp import RDP from .scatter_decimator import ScatterDecimator
import typing as t import numpy as np from ..utils import Decimator class MinMaxDecimator(Decimator): """A decimator using the MinMax algorithm. The MinMax algorithm is an efficient algorithm that preserves the peaks within the data. It can work very well with noisy signal data where data peeks need to be highlighted. This class can only be used with line charts. """ _CHART_MODES = ["lines+markers"] def __init__(self, n_out: int, threshold: t.Optional[int] = None, zoom: t.Optional[bool] = True): """Initialize a new `MinMaxDecimator`. Arguments: n_out (int): The maximum number of points that will be displayed after decimation. threshold (Optional[int]): The minimum amount of data points before the decimation is applied. zoom (Optional[bool]): set to True to reapply the decimation when zoom or re-layout events are triggered. """ super().__init__(threshold, zoom) self._n_out = n_out // 2 def decimate(self, data: np.ndarray, payload: t.Dict[str, t.Any]) -> np.ndarray: if self._n_out >= data.shape[0]: return np.full(len(data), False) # Create a boolean mask x = data[:, 0] y = data[:, 1] num_bins = self._n_out pts_per_bin = x.size // num_bins # Create temp to hold the reshaped & slightly cropped y y_temp = y[: num_bins * pts_per_bin].reshape((num_bins, pts_per_bin)) # use argmax/min to get column locations cc_max = np.argmax(y_temp, axis=1) cc_min = np.argmin(y_temp, axis=1) rr = np.arange(0, num_bins) # compute the flat index to where these are flat_max = cc_max + rr * pts_per_bin flat_min = cc_min + rr * pts_per_bin mm_mask = np.full((x.size,), False) mm_mask[flat_max] = True mm_mask[flat_min] = True return mm_mask
import typing as t import numpy as np from ..utils import Decimator class LTTB(Decimator): """A decimator using the LTTB algorithm. The LTTB algorithm is an high performance algorithm that significantly reduces the number of data points. It can work very well with time-series data to show trends using by using only a few data points. This class can only be used with line charts. """ _CHART_MODES = ["lines+markers"] def __init__(self, n_out: int, threshold: t.Optional[int] = None, zoom: t.Optional[bool] = True) -> None: """Initialize a new `LTTB`. Arguments: n_out (int): The maximum number of points that will be displayed after decimation. threshold (Optional[int]): The minimum amount of data points before the decimation is applied. zoom (Optional[bool]): set to True to reapply the decimation when zoom or re-layout events are triggered. """ super().__init__(threshold, zoom) self._n_out = n_out @staticmethod def _areas_of_triangles(a, bs, c): bs_minus_a = bs - a a_minus_bs = a - bs return 0.5 * abs((a[0] - c[0]) * (bs_minus_a[:, 1]) - (a_minus_bs[:, 0]) * (c[1] - a[1])) def decimate(self, data: np.ndarray, payload: t.Dict[str, t.Any]) -> np.ndarray: n_out = self._n_out if n_out >= data.shape[0]: return np.full(len(data), True) if n_out < 3: raise ValueError("Can only down-sample to a minimum of 3 points") # Split data into bins n_bins = n_out - 2 data_bins = np.array_split(data[1:-1], n_bins) prev_a = data[0] start_pos = 0 # Prepare output mask array # First and last points are the same as in the input. out_mask = np.full(len(data), False) out_mask[0] = True out_mask[len(data) - 1] = True # Largest Triangle Three Buckets (LTTB): # In each bin, find the point that makes the largest triangle # with the point saved in the previous bin # and the centroid of the points in the next bin. for i in range(len(data_bins)): this_bin = data_bins[i] next_bin = data_bins[i + 1] if i < n_bins - 1 else data[-1:] a = prev_a bs = this_bin c = next_bin.mean(axis=0) areas = LTTB._areas_of_triangles(a, bs, c) bs_pos = np.argmax(areas) prev_a = bs[bs_pos] out_mask[start_pos + bs_pos] = True start_pos += len(this_bin) return out_mask
import typing as t import numpy as np from ..utils import Decimator class RDP(Decimator): """A decimator using the RDP algorithm. The RDP algorithm reduces a shape made of line segments into a similar shape with less points. This algorithm should be used if the final visual representation is prioritized over the performance of the application. This class can only be used with line charts. """ _CHART_MODES = ["lines+markers"] def __init__( self, epsilon: t.Optional[int] = None, n_out: t.Optional[int] = None, threshold: t.Optional[int] = None, zoom: t.Optional[bool] = True, ): """Initialize a new `RDP`. Arguments: epsilon (Optional[int]): The epsilon value for the RDP algorithm. If this value is being used, the *n_out* argument is ignored. n_out (Optional(int)): The maximum number of points that are displayed after decimation. This value is ignored if the epsilon value is used.<br/> This process is not very efficient so consider using `LTTB` or `MinMaxDecimator` if the provided data has more than 100.000 data points. threshold (Optional[int]): The minimum amount of data points before the decimation is applied. zoom (Optional[bool]): set to True to reapply the decimation when zoom or re-layout events are triggered. """ super().__init__(threshold, zoom) self._epsilon = epsilon self._n_out = n_out @staticmethod def dsquared_line_points(P1, P2, points): """ Calculate only squared distance, only needed for comparison """ xdiff = P2[0] - P1[0] ydiff = P2[1] - P1[1] nom = (ydiff * points[:, 0] - xdiff * points[:, 1] + P2[0] * P1[1] - P2[1] * P1[0]) ** 2 denom = ydiff**2 + xdiff**2 return np.divide(nom, denom) @staticmethod def __rdp_epsilon(data, epsilon: int): # initiate mask array # same amount of points mask = np.empty(data.shape[0], dtype=bool) # Assume all points are valid and falsify those which are found mask.fill(True) # The stack to select start and end index stack: t.List[t.Tuple[int, int]] = [(0, data.shape[0] - 1)] # type: ignore while stack: # Pop the last item (start, end) = stack.pop() # nothing to calculate if no points in between if end - start <= 1: continue # Calculate distance to points P1 = data[start] P2 = data[end] points = data[start + 1 : end] dsq = RDP.dsquared_line_points(P1, P2, points) mask_eps = dsq > epsilon**2 if mask_eps.any(): # max point outside eps # Include index that was sliced out # Also include the start index to get absolute index # And not relative mid = np.argmax(dsq) + 1 + start stack.append((start, mid)) # type: ignore stack.append((mid, end)) # type: ignore else: # Points in between are redundant mask[start + 1 : end] = False return mask @staticmethod def __rdp_points(M, n_out): M_len = M.shape[0] if M_len <= n_out: mask = np.empty(M_len, dtype=bool) mask.fill(True) return mask weights = np.empty(M_len) # weights.fill(0) weights[0] = float("inf") weights[M_len - 1] = float("inf") stack = [(0, M_len - 1)] while stack: (start, end) = stack.pop() if end - start <= 1: continue dsq = RDP.dsquared_line_points(M[start], M[end], M[start + 1 : end]) max_dist_index = np.argmax(dsq) + start + 1 weights[max_dist_index] = np.amax(dsq) stack.append((start, max_dist_index)) stack.append((max_dist_index, end)) maxTolerance = np.sort(weights)[M_len - n_out] return weights >= maxTolerance def decimate(self, data: np.ndarray, payload: t.Dict[str, t.Any]) -> np.ndarray: if self._epsilon: return RDP.__rdp_epsilon(data, self._epsilon) elif self._n_out: return RDP.__rdp_points(data, self._n_out) raise RuntimeError("RDP Decimator failed to run. Fill in either 'epsilon' or 'n_out' value")
import typing as t from ..utils.singleton import _Singleton if t.TYPE_CHECKING: from ._element import _Block class _BuilderContextManager(object, metaclass=_Singleton): def __init__(self): self.__blocks: t.List["_Block"] = [] def push(self, element: "_Block") -> None: self.__blocks.append(element) def pop(self) -> t.Optional["_Block"]: return self.__blocks.pop() if self.__blocks else None def peek(self) -> t.Optional["_Block"]: return self.__blocks[-1] if self.__blocks else None
from ._api_generator import _ElementApiGenerator from ._element import html # separate import for "Page" class so stubgen can properly generate pyi file from .page import Page _ElementApiGenerator().add_default()
import typing as t from .._renderers import _Renderer from ._context_manager import _BuilderContextManager from ._element import _Block, _DefaultBlock, _Element class Page(_Renderer): """Page generator for the Builder API. This class is used to create a page created with the Builder API.<br/> Instance of this class can be added to the application using `Gui.add_page()^`. This class is typically be used as a Python Context Manager to add the elements.<br/> Here is how you can create a single-page application, creating the elements with code: ```py from taipy.gui import Gui from taipy.gui.builder import Page, button def do_something(state): pass with Page() as page: button(label="Press me", on_action=do_something) Gui(page).run() ``` """ def __init__(self, element: t.Optional[_Element] = None, **kwargs) -> None: """Initialize a new page. Arguments: element (*Element*): An optional element, defined in the `taipy.gui.builder` module, that is created in the page.<br/> The default creates a `part` where several elements can be stored. """ if element is None: element = _DefaultBlock() kwargs["content"] = element super().__init__(**kwargs) # Generate JSX from Element Object def render(self, gui) -> str: if self._base_element is None: return "<h1>No Base Element found for Page</h1>" return self._base_element._render(gui) def add(self, *elements: _Element): if not isinstance(self._base_element, _Block): raise RuntimeError("Can't add child element to non-block element") for element in elements: if element not in self._base_element._children: self._base_element._children.append(element) return self def __enter__(self): if self._base_element is None: raise RuntimeError("Can't use context manager with missing block element for Page") if not isinstance(self._base_element, _Block): raise RuntimeError("Can't add child element to non-block element") _BuilderContextManager().push(self._base_element) return self def __exit__(self, exc_type, exc_value, traceback): _BuilderContextManager().pop()
import typing as t from .._renderers.factory import _Factory class _BuilderFactory(_Factory): @staticmethod def create_element(gui, element_type: str, properties: t.Dict[str, t.Any]) -> t.Tuple[str, str]: builder_html = _Factory.call_builder(gui, element_type, properties, True) if builder_html is None: return f"<div>INVALID SYNTAX - Element is '{element_type}'", "div" return builder_html # type: ignore
from __future__ import annotations import copy import typing as t from abc import ABC, abstractmethod from collections.abc import Iterable from ._context_manager import _BuilderContextManager from ._factory import _BuilderFactory if t.TYPE_CHECKING: from ..gui import Gui class _Element(ABC): """NOT DOCUMENTED""" _ELEMENT_NAME = "" _DEFAULT_PROPERTY = "" def __new__(cls, *args, **kwargs): obj = super(_Element, cls).__new__(cls) parent = _BuilderContextManager().peek() if parent is not None: parent.add(obj) return obj def __init__(self, *args, **kwargs): self._properties: t.Dict[str, t.Any] = {} if args and self._DEFAULT_PROPERTY != "": self._properties = {self._DEFAULT_PROPERTY: args[0]} self._properties.update(kwargs) self.parse_properties() def update(self, **kwargs): self._properties.update(kwargs) self.parse_properties() # Convert property value to string def parse_properties(self): self._properties = {k: _Element._parse_property(v) for k, v in self._properties.items()} # Get a deepcopy version of the properties def _deepcopy_properties(self): return copy.deepcopy(self._properties) @staticmethod def _parse_property(value: t.Any) -> t.Any: if isinstance(value, (str, dict, Iterable)): return value if hasattr(value, "__name__"): return str(getattr(value, "__name__")) return str(value) @abstractmethod def _render(self, gui: "Gui") -> str: pass class _Block(_Element): """NOT DOCUMENTED""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._children: t.List[_Element] = [] def add(self, *elements: _Element): for element in elements: if element not in self._children: self._children.append(element) return self def __enter__(self): _BuilderContextManager().push(self) return self def __exit__(self, exc_type, exc_value, traceback): _BuilderContextManager().pop() def _render(self, gui: "Gui") -> str: el = _BuilderFactory.create_element(gui, self._ELEMENT_NAME, self._deepcopy_properties()) return f"{el[0]}{self._render_children(gui)}</{el[1]}>" def _render_children(self, gui: "Gui") -> str: return "\n".join([child._render(gui) for child in self._children]) class _DefaultBlock(_Block): _ELEMENT_NAME = "part" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) class html(_Block): """A visual element defined as an HTML tag. Use this class to integrate raw HTML to your page. This element can be used as a block element. """ def __init__(self, *args, **kwargs): """Create a new `html` block. Arguments: args (any[]): A list of one or two unnamed arguments: - *args[0]* is the HTML tag name. If empty or None, this represents an HTML text node. - *args[1]* (optional) is the text of this element.<br/> Note that special HTML characters (such as '&lt;' or '&amp;') do not need to be protected. kwargs (dict[str, any]): the HTML attributes for this element.<br/> These should be valid attribute names, with valid attribute values. Examples: - To generate `<br/>`, use: ``` html("br") ``` - To generate `<h1>My page title</h1>`, use: ``` html("h1", "My page title") ``` - To generate `<h1 id="page-title">My page title</h1>`, use: ``` html("h1", "My page title", id="page-title") ``` - To generate `<p>This is a <b>Taipy GUI</b> element.</p>`, use: ``` with html("p"): html(None, "This is a ") html("b", "Taipy GUI") html(None, " element.") ``` """ super().__init__(*args, **kwargs) if not args: raise RuntimeError("Can't render html element. Missing html tag name.") self._ELEMENT_NAME = args[0] if args[0] else None self._content = args[1] if len(args) > 1 else "" def _render(self, gui: "Gui") -> str: if self._ELEMENT_NAME: attrs = "" if self._properties: attrs = " " + " ".join([f'{k}="{str(v)}"' for k, v in self._properties.items()]) return f"<{self._ELEMENT_NAME}{attrs}>{self._content}{self._render_children(gui)}</{self._ELEMENT_NAME}>" else: return self._content class _Control(_Element): """NOT DOCUMENTED""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _render(self, gui: "Gui") -> str: el = _BuilderFactory.create_element(gui, self._ELEMENT_NAME, self._deepcopy_properties()) return ( f"<div>{el[0]}</{el[1]}></div>" if f"<{el[1]}" in el[0] and f"</{el[1]}" not in el[0] else f"<div>{el[0]}</div>" ) def __enter__(self): raise RuntimeError(f"Can't use Context Manager for control type '{self._ELEMENT_NAME}'") def __exit__(self, exc_type, exc_value, traceback): raise RuntimeError(f"Can't use Context Manager for control type '{self._ELEMENT_NAME}'")
import inspect import json import os import sys import types import typing as t from taipy.logger._taipy_logger import _TaipyLogger from ..utils.singleton import _Singleton from ._element import _Block, _Control if t.TYPE_CHECKING: from ..extension.library import ElementLibrary class _ElementApiGenerator(object, metaclass=_Singleton): def __init__(self): self.__module: t.Optional[types.ModuleType] = None @staticmethod def find_default_property(property_list: t.List[t.Dict[str, t.Any]]) -> str: for property in property_list: if "default_property" in property and property["default_property"] is True: return property["name"] return "" def add_default(self): current_frame = inspect.currentframe() error_message = "Cannot generate elements API for the current module" if current_frame is None: raise RuntimeError(f"{error_message}: No frame found.") if current_frame.f_back is None: raise RuntimeError(f"{error_message}: taipy-gui module not found.") module_name = current_frame.f_back.f_globals["__name__"] self.__module = module = sys.modules[module_name] with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "viselements.json"))) as viselements: data = json.load(viselements) if "blocks" not in data or "controls" not in data: raise RuntimeError(f"{error_message}: Invalid viselements.json file.") for blockElement in data["blocks"]: default_property = _ElementApiGenerator.find_default_property(blockElement[1]["properties"]) setattr( module, blockElement[0], _ElementApiGenerator.create_block_api(blockElement[0], blockElement[0], default_property), ) for controlElement in data["controls"]: default_property = _ElementApiGenerator.find_default_property(controlElement[1]["properties"]) setattr( module, controlElement[0], _ElementApiGenerator.create_control_api(controlElement[0], controlElement[0], default_property), ) def add_library(self, library: "ElementLibrary"): library_name = library.get_name() if self.__module is None: _TaipyLogger._get_logger().info( f"Python API for extension library '{library_name}' is not available. To fix this, import 'taipy.gui.builder' before importing the extension library." ) return library_module = getattr(self.__module, library_name, None) if library_module is None: library_module = types.ModuleType(library_name) setattr(self.__module, library_name, library_module) for element_name, element in library.get_elements().items(): setattr( library_module, element_name, _ElementApiGenerator().create_control_api( element_name, f"{library_name}.{element_name}", element.default_attribute ), ) @staticmethod def create_block_api( classname: str, element_name: str, default_property: str, ): return _ElementApiGenerator.create_element_api(classname, element_name, default_property, _Block) @staticmethod def create_control_api( classname: str, element_name: str, default_property: str, ): return _ElementApiGenerator.create_element_api(classname, element_name, default_property, _Control) @staticmethod def create_element_api( classname: str, element_name: str, default_property: str, ElementBaseClass: t.Union[t.Type[_Block], t.Type[_Control]], ): return type( classname, (ElementBaseClass,), { "__init__": lambda self, *args, **kwargs: ElementBaseClass.__init__(self, *args, **kwargs), "_ELEMENT_NAME": element_name, "_DEFAULT_PROPERTY": default_property, }, )
import typing as t from abc import ABC, abstractmethod from os import path from ..page import Page from ..utils import _is_in_notebook, _varname_from_content from ._html import _TaipyHTMLParser if t.TYPE_CHECKING: from ..builder._element import _Element from ..gui import Gui class _Renderer(Page, ABC): def __init__(self, **kwargs) -> None: """NOT DOCUMENTED Initialize a new _Renderer with the indicated content. Arguments: content (str): The text content or the path to the file holding the text to be transformed. If *content* is a path to a readable file, the file is read entirely as the text template. """ from ..builder._element import _Element super().__init__(**kwargs) content: t.Optional[t.Union[str, _Element]] = kwargs.get("content", None) if content is None: raise ValueError("'content' argument is missing for class '_Renderer'") self._content = "" self._base_element: t.Optional[_Element] = None self._filepath = "" if isinstance(content, str): self.__process_content(content) elif isinstance(content, _Element): self._base_element = content else: raise ValueError( f"'content' argument has incorrect type '{type(content).__name__}'. This must be a string or an Builder element." ) def __process_content(self, content: str) -> None: if path.exists(content) and path.isfile(content): return self.__parse_file_content(content) if self._frame is not None: frame_dir_path = path.dirname(path.abspath(self._frame.f_code.co_filename)) content_path = path.join(frame_dir_path, content) if path.exists(content_path) and path.isfile(content_path): return self.__parse_file_content(content_path) self._content = content def __parse_file_content(self, content): with open(t.cast(str, content), "r") as f: self._content = f.read() # Save file path for error handling self._filepath = content def set_content(self, content: str) -> None: """Set a new page content. Reads the new page content and reinitializes the `Page^` instance to reflect the change. !!! important This function can only be used in an IPython notebook context. Arguments: content (str): The text content or the path to the file holding the text to be transformed. If *content* is a path to a readable file, the file is read entirely as the text template. Exceptions: RuntimeError: If this method is called outside an IPython notebook context. """ if not _is_in_notebook(): raise RuntimeError("'set_content()' must be used in an IPython notebook context") self.__process_content(content) def _get_content_detail(self, gui: "Gui") -> str: if self._filepath: return f"in file '{self._filepath}'" if varname := _varname_from_content(gui, self._content): return f"in variable '{varname}'" return "" @abstractmethod def render(self, gui: "Gui") -> str: pass class _EmptyPage(_Renderer): def __init__(self) -> None: super().__init__(content="<PageContent />") def render(self, gui: "Gui") -> str: return self._content class Markdown(_Renderer): """Page generator for *Markdown* text. Taipy can use Markdown text to create pages that are the base of user interfaces. You can find details on the Taipy Markdown-specific syntax and how to add Taipy Visual Elements in the [section on HTML](../gui/pages/index.md#using-markdown) of the User Manual. """ def __init__(self, content: str, **kwargs) -> None: """Initialize a new `Markdown` page. Arguments: content (str): The text content or the path to the file holding the Markdown text to be transformed.<br/> If _content_ is a path to a readable file, the file is read as the Markdown template content. """ kwargs["content"] = content super().__init__(**kwargs) # Generate JSX from Markdown def render(self, gui: "Gui") -> str: return gui._markdown.convert(self._content) class Html(_Renderer): """Page generator for *HTML* text. Taipy can use HTML code to create pages that are the base of user interfaces. You can find details on HTML-specific constructs and how to add Taipy Visual Elements in the [section on HTML](../gui/pages/index.md#using-html) of the User Manual. """ def __init__(self, content: str, **kwargs) -> None: """Initialize a new `Html` page. Arguments: content (str): The text content or the path to the file holding the HTML text to be transformed.<br/> If *content* is a path to a readable file, the file is read as the HTML template content. """ kwargs["content"] = content super().__init__(**kwargs) self.head = None # Modify path routes def modify_taipy_base_url(self, base_url): self._content = self._content.replace("{{taipy_base_url}}", f"{base_url}") # Generate JSX from HTML def render(self, gui: "Gui") -> str: parser = _TaipyHTMLParser(gui) parser.feed_data(self._content) self.head = parser.head return parser.get_jsx()
import re import typing as t from datetime import datetime from ..types import PropertyType from .builder import _Builder if t.TYPE_CHECKING: from ..extension.library import ElementLibrary from ..gui import Gui class _Factory: DEFAULT_CONTROL = "text" _START_SUFFIX = ".start" _END_SUFFIX = ".end" __TAIPY_NAME_SPACE = "taipy." __CONTROL_DEFAULT_PROP_NAME = { "button": "label", "chart": "data", "content": "value", "date": "date", "date_range": "dates", "dialog": "open", "expandable": "title", "file_download": "content", "file_selector": "content", "image": "content", "indicator": "display", "input": "value", "layout": "columns", "menu": "lov", "navbar": "value", "number": "value", "pane": "open", "part": "class_name", "selector": "value", "slider": "value", "status": "value", "table": "data", "text": "value", "toggle": "value", "tree": "value", } _TEXT_ATTRIBUTES = ["format", "id", "hover_text", "raw"] __TEXT_ANCHORS = ["bottom", "top", "left", "right"] __TEXT_ANCHOR_NONE = "none" __LIBRARIES: t.Dict[str, t.List["ElementLibrary"]] = {} __CONTROL_BUILDERS = { "button": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Button", attributes=attrs, ) .set_value_and_default(with_update=False) .set_attributes( [ ("id",), ("on_action", PropertyType.function), ("active", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ] ), "chart": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Chart", attributes=attrs, ) .set_value_and_default(with_default=False, var_type=PropertyType.data) .set_attributes( [ ("id",), ("title",), ("width", PropertyType.string_or_number), ("height", PropertyType.string_or_number), ("layout", PropertyType.dynamic_dict), ("plot_config", PropertyType.dict), ("on_range_change", PropertyType.function), ("active", PropertyType.dynamic_boolean, True), ("render", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ("on_change", PropertyType.function), ("template", PropertyType.dict), ("template[dark]", PropertyType.dict, gui._get_config("chart_dark_template", None)), ("template[light]", PropertyType.dict), ] ) ._get_chart_config("scatter", "lines+markers") ._set_propagate(), "content": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="PageContent", attributes=attrs ), "date": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="DateSelector", attributes=attrs, default_value=datetime.fromtimestamp(0), ) .set_value_and_default(var_type=PropertyType.date) .set_attributes( [ ("with_time", PropertyType.boolean), ("id",), ("active", PropertyType.dynamic_boolean, True), ("editable", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ("label",), ("on_change", PropertyType.function), ("format",), ] ) ._set_propagate(), "date_range": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="DateRange", attributes=attrs, ) .set_value_and_default(var_type=PropertyType.date_range) .set_attributes( [ ("with_time", PropertyType.boolean), ("id",), ("active", PropertyType.dynamic_boolean, True), ("editable", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ("label_start",), ("label_end",), ("on_change", PropertyType.function), ("format",), ] ) ._set_propagate(), "dialog": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Dialog", attributes=attrs, ) .set_value_and_default(var_type=PropertyType.dynamic_boolean) ._set_partial() # partial should be set before page .set_attributes( [ ("id",), ("page",), ("title",), ("on_action", PropertyType.function), ("close_label", PropertyType.string), ("labels", PropertyType.string_list), ("active", PropertyType.dynamic_boolean, True), ("width", PropertyType.string_or_number), ("height", PropertyType.string_or_number), ("hover_text", PropertyType.dynamic_string), ] ) ._set_propagate(), "expandable": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Expandable", attributes=attrs, default_value=None ) .set_value_and_default() ._set_partial() # partial should be set before page .set_attributes( [ ("id",), ("page",), ("expanded", PropertyType.dynamic_boolean, True, True, False), ("hover_text", PropertyType.dynamic_string), ("on_change", PropertyType.function), ] ), "file_download": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="FileDownload", attributes=attrs, ) .set_value_and_default(var_name="label", with_update=False) ._set_content("content", image=False) .set_attributes( [ ("id",), ("on_action", PropertyType.function), ("active", PropertyType.dynamic_boolean, True), ("render", PropertyType.dynamic_boolean, True), ("auto", PropertyType.boolean, False), ("bypass_preview", PropertyType.boolean, True), ("name",), ("hover_text", PropertyType.dynamic_string), ] ), "file_selector": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="FileSelector", attributes=attrs, ) .set_value_and_default(var_name="label", with_update=False) ._set_file_content() .set_attributes( [ ("id",), ("on_action", PropertyType.function), ("active", PropertyType.dynamic_boolean, True), ("multiple", PropertyType.boolean, False), ("extensions",), ("drop_message",), ("hover_text", PropertyType.dynamic_string), ] ), "image": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Image", attributes=attrs, ) .set_value_and_default(var_name="label", with_update=False) ._set_content("content") .set_attributes( [ ("id",), ("on_action", PropertyType.function), ("active", PropertyType.dynamic_boolean, True), ("width",), ("height",), ("hover_text", PropertyType.dynamic_string), ] ), "indicator": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Indicator", attributes=attrs, ) .set_value_and_default(with_update=False, native_type=True) .set_attributes( [ ("id",), ("min", PropertyType.number), ("max", PropertyType.number), ("value", PropertyType.dynamic_number), ("format",), ("orientation"), ("hover_text", PropertyType.dynamic_string), ("width",), ("height",), ] ), "input": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Input", attributes=attrs, ) ._set_input_type("text", True) .set_value_and_default() ._set_propagate() .set_attributes( [ ("id",), ("active", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ("on_change", PropertyType.function), ("on_action", PropertyType.function), ("action_keys",), ("label",), ("change_delay", PropertyType.number, gui._get_config("change_delay", None)), ("multiline", PropertyType.boolean, False), ("lines_shown", PropertyType.number, 5), ] ), "layout": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Layout", attributes=attrs, default_value=None ) .set_value_and_default(with_default=False) .set_attributes( [ ("id",), ("columns[mobile]",), ("gap",), ] ), "menu": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="MenuCtl", attributes=attrs, ) ._get_adapter("lov") # need to be called before set_lov ._set_lov() .set_attributes( [ ("id",), ("active", PropertyType.dynamic_boolean, True), ("label"), ("width"), ("width[mobile]",), ("on_action", PropertyType.function), ("inactive_ids", PropertyType.dynamic_list), ("hover_text", PropertyType.dynamic_string), ] ) ._set_propagate(), "navbar": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="NavBar", attributes=attrs, default_value=None ) ._get_adapter("lov", multi_selection=False) # need to be called before set_lov ._set_lov() .set_attributes( [ ("id",), ("active", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ] ), "number": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Input", attributes=attrs, default_value=0, ) ._set_input_type("number") .set_value_and_default(var_type=PropertyType.dynamic_number) ._set_propagate() .set_attributes( [ ("id",), ("active", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ("on_change", PropertyType.function), ("on_action", PropertyType.function), ("label",), ("change_delay", PropertyType.number, gui._get_config("change_delay", None)), ] ), "pane": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Pane", attributes=attrs, default_value=None ) .set_value_and_default(var_type=PropertyType.dynamic_boolean) ._set_partial() # partial should be set before page .set_attributes( [ ("id",), ("page",), ("anchor", PropertyType.string, "left"), ("on_close", PropertyType.function), ("persistent", PropertyType.boolean, False), ("active", PropertyType.dynamic_boolean, True), ("width", PropertyType.string_or_number, "30vw"), ("height", PropertyType.string_or_number, "30vh"), ("hover_text", PropertyType.dynamic_string), ("on_change", PropertyType.function), ] ) ._set_propagate(), "part": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Part", attributes=attrs, default_value=None ) ._set_partial() # partial should be set before page .set_attributes( [ ("id",), ("page", PropertyType.dynamic_string), ("render", PropertyType.dynamic_boolean, True), ("height", PropertyType.dynamic_string), ("content", PropertyType.toHtmlContent), ] ), "selector": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Selector", attributes=attrs, default_value=None ) .set_value_and_default(with_default=False, var_type=PropertyType.lov_value) ._get_adapter("lov") # need to be called before set_lov ._set_lov() .set_attributes( [ ("active", PropertyType.dynamic_boolean, True), ("dropdown", PropertyType.boolean, False), ("filter", PropertyType.boolean), ("height", PropertyType.string_or_number), ("hover_text", PropertyType.dynamic_string), ("id",), ("value_by_id", PropertyType.boolean), ("multiple", PropertyType.boolean), ("width", PropertyType.string_or_number), ("on_change", PropertyType.function), ("label",), ] ) ._set_propagate(), "slider": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Slider", attributes=attrs, default_value=0, ) .set_value_and_default(native_type=True, var_type=PropertyType.slider_value) .set_attributes( [ ("active", PropertyType.dynamic_boolean, True), ("height"), ("hover_text", PropertyType.dynamic_string), ("id",), ("value_by_id", PropertyType.boolean), ("max", PropertyType.number, 100), ("min", PropertyType.number, 0), ("orientation"), ("width", PropertyType.string, "300px"), ("on_change", PropertyType.function), ("continuous", PropertyType.boolean, True), ("lov", PropertyType.lov), ("change_delay", PropertyType.number, gui._get_config("change_delay", None)), ] ) ._set_labels() ._set_string_with_check("text_anchor", _Factory.__TEXT_ANCHORS + [_Factory.__TEXT_ANCHOR_NONE], "bottom") ._set_propagate(), "status": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Status", attributes=attrs, ) .set_value_and_default(with_update=False) .set_attributes( [ ("id",), ("without_close", PropertyType.boolean, False), ("hover_text", PropertyType.dynamic_string), ] ), "table": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Table", attributes=attrs, ) .set_value_and_default(with_default=False, var_type=PropertyType.data) ._get_dataframe_attributes() .set_attributes( [ ("page_size", PropertyType.number, "100"), ("allow_all_rows", PropertyType.boolean), ("show_all", PropertyType.boolean), ("auto_loading", PropertyType.boolean), ("width", PropertyType.string_or_number, "100%"), ("height", PropertyType.string_or_number, "80vh"), ("id",), ("active", PropertyType.dynamic_boolean, True), ("editable", PropertyType.dynamic_boolean, True), ("on_edit", PropertyType.function), ("on_delete", PropertyType.function), ("on_add", PropertyType.function), ("on_action", PropertyType.function), ("nan_value",), ("filter", PropertyType.boolean), ("hover_text", PropertyType.dynamic_string), ("size",), ] ) ._set_propagate() ._get_list_attribute("selected", PropertyType.number) ._set_table_pagesize_options(), "text": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Field", attributes=attrs, ) .set_value_and_default(with_update=False) ._set_dataType() .set_attributes( [ ("format",), ("id",), ("hover_text", PropertyType.dynamic_string), ("raw", PropertyType.boolean, False), ("mode", PropertyType.string), ] ), "toggle": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="Toggle", attributes=attrs, default_value=None ) .set_value_and_default(with_default=False, var_type=PropertyType.lov_value) ._get_adapter("lov", multi_selection=False) # need to be called before set_lov ._set_lov() .set_attributes( [ ("active", PropertyType.dynamic_boolean, True), ("hover_text", PropertyType.dynamic_string), ("id",), ("label",), ("value_by_id", PropertyType.boolean), ("unselected_value", PropertyType.string, ""), ("allow_unselect", PropertyType.boolean), ("on_change", PropertyType.function), ] ) ._set_kind() ._set_propagate(), "tree": lambda gui, control_type, attrs: _Builder( gui=gui, control_type=control_type, element_name="TreeView", attributes=attrs, ) .set_value_and_default(with_default=False, var_type=PropertyType.lov_value) .set_attributes( [ ("active", PropertyType.dynamic_boolean, True), ("expanded", PropertyType.boolean_or_list, True), ("filter", PropertyType.boolean), ("hover_text", PropertyType.dynamic_string), ("height", PropertyType.string_or_number), ("id",), ("value_by_id", PropertyType.boolean), ("multiple", PropertyType.boolean), ("width", PropertyType.string_or_number), ("on_change", PropertyType.function), ("select_leafs_only", PropertyType.boolean), ("row_height", PropertyType.string), ("lov", PropertyType.lov), ] ) ._set_propagate(), } # TODO: process \" in property value _PROPERTY_RE = re.compile(r"\s+([a-zA-Z][\.a-zA-Z_$0-9]*(?:\[(?:.*?)\])?)=\"((?:(?:(?<=\\)\")|[^\"])*)\"") @staticmethod def set_library(library: "ElementLibrary"): from ..extension.library import Element, ElementLibrary if isinstance(library, ElementLibrary) and isinstance(library.get_name(), str) and library.get_elements(): elements = library.get_elements() for name, element in elements.items(): if isinstance(element, Element): element.check(name) fact_lib = _Factory.__LIBRARIES.get(library.get_name()) if fact_lib is None: _Factory.__LIBRARIES.update({library.get_name(): [library]}) else: fact_lib.append(library) @staticmethod def get_default_property_name(control_name: str) -> t.Optional[str]: name = ( control_name[: -len(_Factory._START_SUFFIX)] if control_name.endswith(_Factory._START_SUFFIX) else control_name[: -len(_Factory._END_SUFFIX)] if control_name.endswith(_Factory._END_SUFFIX) else control_name ) name = name[len(_Factory.__TAIPY_NAME_SPACE) :] if name.startswith(_Factory.__TAIPY_NAME_SPACE) else name prop = _Factory.__CONTROL_DEFAULT_PROP_NAME.get(name) if prop is None: _, _, element = _Factory.__get_library_element(name) if element: prop = element.default_attribute return prop @staticmethod def __get_library_element(name: str): parts = name.split(".") if len(parts) > 1: element_name = ".".join(parts[1:]) for lib in _Factory.__LIBRARIES.get(parts[0], []): elts = lib.get_elements() if isinstance(elts, dict): element = elts.get(element_name) if element: return lib, element_name, element else: element_name = name for libs in list(_Factory.__LIBRARIES.values()): for lib in libs: elts = lib.get_elements() if isinstance(elts, dict): element = elts.get(element_name) if element: return lib, element_name, element return None, None, None @staticmethod def call_builder( gui: "Gui", name: str, all_properties: t.Optional[t.Dict[str, t.Any]] = None, is_html: t.Optional[bool] = False ) -> t.Optional[t.Union[t.Any, t.Tuple[str, str]]]: name = name[len(_Factory.__TAIPY_NAME_SPACE) :] if name.startswith(_Factory.__TAIPY_NAME_SPACE) else name builder = _Factory.__CONTROL_BUILDERS.get(name) built = None if builder is None: lib, element_name, element = _Factory.__get_library_element(name) if lib: from ..extension.library import Element if isinstance(element, Element): return element._call_builder(element_name, gui, all_properties, lib, is_html) else: built = builder(gui, name, all_properties) if isinstance(built, _Builder): return built._build_to_string() if is_html else built.el return None
import contextlib import json import numbers import time as _time import typing as t import xml.etree.ElementTree as etree from datetime import date, datetime, time from inspect import isclass from urllib.parse import quote from .._warnings import _warn from ..partial import Partial from ..types import PropertyType, _get_taipy_type from ..utils import ( _date_to_string, _get_broadcast_var_name, _get_client_var_name, _get_data_type, _get_expr_var_name, _getscopeattr, _getscopeattr_drill, _is_boolean, _is_boolean_true, _MapDict, _to_camel_case, ) from ..utils.chart_config_builder import _CHART_NAMES, _build_chart_config from ..utils.table_col_builder import _enhance_columns, _get_name_indexed_property from ..utils.types import _TaipyBase, _TaipyData from .json import _TaipyJsonEncoder from .utils import _add_to_dict_and_get, _get_columns_dict, _get_tuple_val if t.TYPE_CHECKING: from ..gui import Gui class _Builder: """ Constructs an XML node that can be rendered as a React node. This class can only be instantiated internally by Taipy. """ __keys: t.Dict[str, int] = {} __BLOCK_CONTROLS = ["dialog", "expandable", "pane", "part"] __TABLE_COLUMNS_DEPS = [ "data", "columns", "date_format", "number_format", "nan_value", "width", "filter", "editable", "group_by", "apply", "style", "tooltip", ] def __init__( self, gui: "Gui", control_type: str, element_name: str, attributes: t.Optional[t.Dict[str, t.Any]], hash_names: t.Dict[str, str] = {}, default_value="<Empty>", lib_name: str = "taipy", ): from ..gui import Gui from .factory import _Factory self.el = etree.Element(element_name) self.__control_type = control_type self.__element_name = element_name self.__lib_name = lib_name self.__attributes = attributes or {} self.__hashes = hash_names.copy() self.__update_vars: t.List[str] = [] self.__gui: Gui = gui self.__default_property_name = _Factory.get_default_property_name(control_type) or "" default_property_value = self.__attributes.get(self.__default_property_name, None) if default_property_value is None and default_value is not None: self.__attributes[self.__default_property_name] = default_value # Bind properties dictionary to attributes if condition is matched (will leave the binding for function at the builder ) if "properties" in self.__attributes: (prop_dict, prop_hash) = _Builder.__parse_attribute_value(gui, self.__attributes["properties"]) if prop_hash is None: prop_hash = prop_dict prop_hash = self.__gui._bind_var(prop_hash) if hasattr(self.__gui._bindings(), prop_hash): prop_dict = _getscopeattr(self.__gui, prop_hash) if isinstance(prop_dict, (dict, _MapDict)): # Iterate through prop_dict and append to self.attributes var_name, _ = gui._get_real_var_name(prop_hash) for k, v in prop_dict.items(): (val, key_hash) = _Builder.__parse_attribute_value(gui, v) self.__attributes[k] = ( f"{{None if ({var_name}) is None else ({var_name}).get('{k}')}}" if key_hash is None else v ) else: _warn(f"{self.__control_type}.properties ({prop_hash}) must be a dict.") # Bind potential function and expressions in self.attributes self.__hashes.update(_Builder._get_variable_hash_names(gui, self.__attributes, hash_names)) # set classname self.__set_class_names() # define a unique key self.set_attribute("key", _Builder._get_key(self.__element_name)) @staticmethod def __parse_attribute_value(gui: "Gui", value) -> t.Tuple: if isinstance(value, str) and gui._is_expression(value): hash_value = gui._evaluate_expr(value) try: func = gui._get_user_function(hash_value) if callable(func): return (func, hash_value) return (_getscopeattr_drill(gui, hash_value), hash_value) except AttributeError: _warn(f"Expression '{value}' cannot be evaluated.") return (value, None) @staticmethod def _get_variable_hash_names( gui: "Gui", attributes: t.Dict[str, t.Any], hash_names: t.Dict[str, str] = {} ) -> t.Dict[str, str]: hashes = {} # Bind potential function and expressions in self.attributes for k, v in attributes.items(): val = v hashname = hash_names.get(k) if hashname is None: if callable(v): if v.__name__ == "<lambda>": hashname = _get_expr_var_name(v.__code__) gui._bind_var_val(hashname, v) else: hashname = _get_expr_var_name(v.__name__) elif isinstance(v, str): # need to unescape the double quotes that were escaped during preprocessing (val, hashname) = _Builder.__parse_attribute_value(gui, v.replace('\\"', '"')) if val is not None or hashname: attributes[k] = val if hashname: hashes[k] = hashname return hashes @staticmethod def __to_string(x: t.Any) -> str: return str(x) @staticmethod def _get_key(name: str) -> str: key_index = _Builder.__keys.get(name, 0) _Builder.__keys[name] = key_index + 1 return f"{name}.{key_index}" @staticmethod def _reset_key() -> None: _Builder.__keys = {} def __get_list_of_(self, name: str): lof = self.__attributes.get(name) if isinstance(lof, str): lof = list(lof.split(";")) return lof def get_name_indexed_property(self, name: str) -> t.Dict[str, t.Any]: """ TODO-undocumented Returns all properties defined as <property name>[<named index>] as a dict. Arguments: name (str): The property name. """ return _get_name_indexed_property(self.__attributes, name) def __get_boolean_attribute(self, name: str, default_value=False): boolattr = self.__attributes.get(name, default_value) return _is_boolean_true(boolattr) if isinstance(boolattr, str) else bool(boolattr) def set_boolean_attribute(self, name: str, value: bool): """ TODO-undocumented Defines a React Boolean attribute (attr={true|false}). Arguments: name (str): The property name. value (bool): the boolean value. """ return self.__set_react_attribute(_to_camel_case(name), value) def set_dict_attribute(self, name: str, default_value: t.Optional[t.Dict[str, t.Any]] = None): """ TODO-undocumented Defines a React attribute as a stringified json dict. The original property can be a dict or a string formed as <key 1>:<value 1>;<key 2>:<value 2>. Arguments: name (str): The property name. default value (dict): used if no value is specified. """ dict_attr = self.__attributes.get(name) if dict_attr is None: dict_attr = default_value if dict_attr is not None: if isinstance(dict_attr, str): vals = [x.strip().split(":") for x in dict_attr.split(";")] dict_attr = {val[0].strip(): val[1].strip() for val in vals if len(val) > 1} if isinstance(dict_attr, (dict, _MapDict)): self.__set_json_attribute(_to_camel_case(name), dict_attr) else: _warn(f"{self.__element_name}: {name} should be a dict: '{str(dict_attr)}'.") return self def set_dynamic_dict_attribute(self, name: str, default_value: t.Optional[t.Dict[str, t.Any]] = None): """ TODO-undocumented Defines a React attribute as a stringified json dict. The original property can be a dict or a string formed as <key 1>:<value 1>;<key 2>:<value 2>. Arguments: name (str): The property name. default value (dict): used if no value is specified. """ dict_attr = self.__attributes.get(name) if dict_attr is None: dict_attr = default_value if dict_attr is not None: if isinstance(dict_attr, str): vals = [x.strip().split(":") for x in dict_attr.split(";")] dict_attr = {val[0].strip(): val[1].strip() for val in vals if len(val) > 1} if isinstance(dict_attr, (dict, _MapDict)): self.__set_json_attribute(_to_camel_case("default_" + name), dict_attr) else: _warn(f"{self.__element_name}: {name} should be a dict: '{str(dict_attr)}'.") if dict_hash := self.__hashes.get(name): dict_hash = self.__get_typed_hash_name(dict_hash, PropertyType.dynamic_dict) prop_name = _to_camel_case(name) self.__update_vars.append(f"{prop_name}={dict_hash}") self.__set_react_attribute(prop_name, dict_hash) return self def __set_json_attribute(self, name, value): return self.set_attribute(name, json.dumps(value, cls=_TaipyJsonEncoder)) def __set_list_of_(self, name: str): lof = self.__get_list_of_(name) if not isinstance(lof, (list, tuple)): if lof is not None: _warn(f"{self.__element_name}: {name} should be a list.") return self return self.__set_json_attribute(_to_camel_case(name), lof) def set_number_attribute(self, name: str, default_value: t.Optional[str] = None, optional: t.Optional[bool] = True): """ TODO-undocumented Defines a React number attribute (attr={<number>}). Arguments: name (str): The property name. default_value (optional(str)): the default value as a string. optional (bool): Default to True, the property is required if False. """ value = self.__attributes.get(name, default_value) if value is None: if not optional: _warn(f"Property {name} is required for control {self.__control_type}.") return self if isinstance(value, str): try: val = float(value) except ValueError: raise ValueError(f"Property {name} expects a number for control {self.__control_type}") elif isinstance(value, numbers.Number): val = value # type: ignore else: raise ValueError( f"Property {name} expects a number for control {self.__control_type}, received {type(value)}" ) return self.__set_react_attribute(_to_camel_case(name), val) def __set_string_attribute( self, name: str, default_value: t.Optional[str] = None, optional: t.Optional[bool] = True ): strattr = self.__attributes.get(name, default_value) if strattr is None: if not optional: _warn(f"Property {name} is required for control {self.__control_type}.") return self return self.set_attribute(_to_camel_case(name), str(strattr)) def __set_dynamic_string_attribute( self, name: str, default_value: t.Optional[str] = None, with_update: t.Optional[bool] = False, dynamic_property_name: t.Optional[str] = None, ): str_val = self.__attributes.get(name, default_value) if str_val is not None: self.set_attribute( _to_camel_case(f"default_{name}" if dynamic_property_name is None else name), str(str_val) ) if hash_name := self.__hashes.get(name): prop_name = _to_camel_case(name if dynamic_property_name is None else dynamic_property_name) if with_update: self.__update_vars.append(f"{prop_name}={hash_name}") self.__set_react_attribute(prop_name, hash_name) return self def __set_function_attribute( self, name: str, default_value: t.Optional[str] = None, optional: t.Optional[bool] = True ): strattr = self.__attributes.get(name, default_value) if strattr is None: if not optional: _warn(f"Property {name} is required for control {self.__control_type}.") return self elif callable(strattr): strattr = self.__hashes.get(name) if strattr is None: return self elif strattr: strattr = str(strattr) func = self.__gui._get_user_function(strattr) if func == strattr: _warn(f"{self.__control_type}.{name}: {strattr} is not a function.") return self.set_attribute(_to_camel_case(name), strattr) if strattr else self def __set_string_or_number_attribute(self, name: str, default_value: t.Optional[t.Any] = None): attr = self.__attributes.get(name, default_value) if attr is None: return self if isinstance(attr, numbers.Number): return self.__set_react_attribute(_to_camel_case(name), attr) else: return self.set_attribute(_to_camel_case(name), attr) def __set_react_attribute(self, name: str, value: t.Any): return self.set_attribute(name, "{!" + (str(value).lower() if isinstance(value, bool) else str(value)) + "!}") def _get_adapter(self, var_name: str, property_name: t.Optional[str] = None, multi_selection=True): # noqa: C901 property_name = var_name if property_name is None else property_name lov = self.__get_list_of_(var_name) if isinstance(lov, list): adapter = self.__attributes.get("adapter") if adapter and isinstance(adapter, str): adapter = self.__gui._get_user_function(adapter) if adapter and not callable(adapter): _warn("'adapter' property value is invalid.") adapter = None var_type = self.__attributes.get("type") if isclass(var_type): var_type = var_type.__name__ # type: ignore if not isinstance(var_type, str): elt = None if len(lov) == 0: value = self.__attributes.get("value") if isinstance(value, list): if len(value) > 0: elt = value[0] else: elt = value else: elt = lov[0] var_type = self.__gui._get_unique_type_adapter(type(elt).__name__) if adapter is None: adapter = self.__gui._get_adapter_for_type(var_type) elif var_type == str.__name__ and callable(adapter): var_type += ( _get_expr_var_name(str(adapter.__code__)) if adapter.__name__ == "<lambda>" else _get_expr_var_name(adapter.__name__) ) if lov_name := self.__hashes.get(var_name): if adapter is None: adapter = self.__gui._get_adapter_for_type(lov_name) else: self.__gui._add_type_for_var(lov_name, var_type) if value_name := self.__hashes.get("value"): if adapter is None: adapter = self.__gui._get_adapter_for_type(value_name) else: self.__gui._add_type_for_var(value_name, var_type) if adapter is not None: self.__gui._add_adapter_for_type(var_type, adapter) # type: ignore ret_list = [] if len(lov) > 0: for elt in lov: ret = self.__gui._run_adapter( t.cast(t.Callable, adapter), elt, adapter.__name__ if callable(adapter) else "adapter" ) # type: ignore if ret is not None: ret_list.append(ret) self.__attributes[f"default_{property_name}"] = ret_list ret_list = [] value = self.__attributes.get("value") val_list = value if isinstance(value, list) else [value] for val in val_list: ret = self.__gui._run_adapter( t.cast(t.Callable, adapter), val, adapter.__name__ if callable(adapter) else "adapter", id_only=True ) # type: ignore if ret is not None: ret_list.append(ret) if multi_selection: self.__set_default_value("value", ret_list) else: ret_val = ret_list[0] if len(ret_list) else "" if ret_val == "-1" and self.__attributes.get("unselected_value") is not None: ret_val = str(self.__attributes.get("unselected_value", "")) self.__set_default_value("value", ret_val) return self def __filter_attribute_names(self, names: t.Iterable[str]): return [k for k in self.__attributes if k in names or any(k.startswith(n + "[") for n in names)] def __get_holded_name(self, key: str): name = self.__hashes.get(key) if name: v = self.__attributes.get(key) if isinstance(v, _TaipyBase): return name[: len(v.get_hash()) + 1] return name def __filter_attributes_hashes(self, keys: t.List[str]): hash_names = [k for k in self.__hashes if k in keys] attr_names = [k for k in keys if k not in hash_names] return ( {k: v for k, v in self.__attributes.items() if k in attr_names}, {k: self.__get_holded_name(k) for k in self.__hashes if k in hash_names}, ) def __build_rebuild_fn(self, fn_name: str, attribute_names: t.Iterable[str]): rebuild = self.__attributes.get("rebuild", False) rebuild_hash = self.__hashes.get("rebuild") if rebuild_hash or rebuild: attributes, hashes = self.__filter_attributes_hashes(self.__filter_attribute_names(attribute_names)) rebuild_name = f"bool({self.__gui._get_real_var_name(rebuild_hash)[0]})" if rebuild_hash else "None" try: self.__gui._set_building(True) return self.__gui._evaluate_expr( "{" + f'{fn_name}({rebuild}, {rebuild_name}, "{quote(json.dumps(attributes))}", "{quote(json.dumps(hashes))}", {", ".join([f"{k}={v2}" for k, v2 in {v: self.__gui._get_real_var_name(v)[0] for v in hashes.values()}.items()])})' + "}" ) finally: self.__gui._set_building(False) return None def _get_dataframe_attributes(self) -> "_Builder": date_format = _add_to_dict_and_get(self.__attributes, "date_format", "MM/dd/yyyy") data = self.__attributes.get("data") data_hash = self.__hashes.get("data", "") col_types = self.__gui._accessors._get_col_types(data_hash, _TaipyData(data, data_hash)) col_dict = _get_columns_dict( data, self.__attributes.get("columns", {}), col_types, date_format, self.__attributes.get("number_format") ) rebuild_fn_hash = self.__build_rebuild_fn( self.__gui._get_rebuild_fn_name("_tbl_cols"), _Builder.__TABLE_COLUMNS_DEPS ) if rebuild_fn_hash: self.__set_react_attribute("columns", rebuild_fn_hash) if col_dict is not None: _enhance_columns(self.__attributes, self.__hashes, col_dict, self.__element_name) self.__set_json_attribute("defaultColumns", col_dict) if line_style := self.__attributes.get("style"): if callable(line_style): value = self.__hashes.get("style") elif isinstance(line_style, str): value = line_style.strip() else: value = None if value in col_types.keys(): _warn(f"{self.__element_name}: style={value} must not be a column name.") elif value: self.set_attribute("lineStyle", value) if tooltip := self.__attributes.get("tooltip"): if callable(tooltip): value = self.__hashes.get("tooltip") elif isinstance(tooltip, str): value = tooltip.strip() else: value = None if value in col_types.keys(): _warn(f"{self.__element_name}: tooltip={value} must not be a column name.") elif value: self.set_attribute("tooltip", value) return self def _get_chart_config(self, default_type: str, default_mode: str): self.__attributes["_default_type"] = default_type self.__attributes["_default_mode"] = default_mode rebuild_fn_hash = self.__build_rebuild_fn( self.__gui._get_rebuild_fn_name("_chart_conf"), _CHART_NAMES + ("_default_type", "_default_mode", "data") ) if rebuild_fn_hash: self.__set_react_attribute("config", rebuild_fn_hash) # read column definitions data = self.__attributes.get("data") data_hash = self.__hashes.get("data", "") col_types = self.__gui._accessors._get_col_types(data_hash, _TaipyData(data, data_hash)) config = _build_chart_config(self.__gui, self.__attributes, col_types) self.__set_json_attribute("defaultConfig", config) self._set_chart_selected(max=len(config.get("traces", ""))) self.__set_refresh_on_update() return self def _set_string_with_check(self, var_name: str, values: t.List[str], default_value: t.Optional[str] = None): value = self.__attributes.get(var_name, default_value) if value is not None: value = str(value).lower() self.__attributes[var_name] = value if value not in values: _warn(f"{self.__element_name}: {var_name}={value} should be in {values}.") else: self.__set_string_attribute(var_name, default_value) return self def __set_list_attribute( self, name: str, hash_name: t.Optional[str], val: t.Any, elt_type: t.Type, dynamic=True ) -> t.List[str]: if not hash_name and isinstance(val, str): val = [elt_type(t.strip()) for t in val.split(";")] if isinstance(val, list): if hash_name and dynamic: self.__set_react_attribute(name, hash_name) return [f"{name}={hash_name}"] else: self.__set_json_attribute(name, val) elif val is not None: _warn(f"{self.__element_name}: {name} should be a list of {elt_type}.") return [] def _set_chart_selected(self, max=0): name = "selected" default_sel = self.__attributes.get(name) idx = 1 name_idx = f"{name}[{idx}]" sel = self.__attributes.get(name_idx) while idx <= max: if sel is not None or default_sel is not None: self.__update_vars.extend( self.__set_list_attribute( f"{name}{idx - 1}", self.__hashes.get(name_idx if sel is not None else name), sel if sel is not None else default_sel, int, ) ) idx += 1 name_idx = f"{name}[{idx}]" sel = self.__attributes.get(name_idx) def _get_list_attribute(self, name: str, list_type: PropertyType): varname = self.__hashes.get(name) if varname is None: list_val = self.__attributes.get(name) if isinstance(list_val, str): list_val = list(list_val.split(";")) if isinstance(list_val, list): # TODO catch the cast exception if list_type.value == PropertyType.number.value: list_val = [int(v) for v in list_val] else: list_val = [int(v) for v in list_val] else: if list_val is not None: _warn(f"{self.__element_name}: {name} should be a list.") list_val = [] self.__set_react_attribute(_to_camel_case(name), list_val) else: self.__set_react_attribute(_to_camel_case(name), varname) return self def __set_class_names(self): self.set_attribute("libClassName", self.__lib_name + "-" + self.__control_type.replace("_", "-")) return self.__set_dynamic_string_attribute("class_name", dynamic_property_name="dynamic_class_name") def _set_dataType(self): value = self.__attributes.get("value") return self.set_attribute("dataType", _get_data_type(value)) def _set_file_content(self, var_name: str = "content"): if hash_name := self.__hashes.get(var_name): self.__set_update_var_name(hash_name) else: _warn(f"file_selector: {var_name} should be bound.") return self def _set_content(self, var_name: str = "content", image=True): content = self.__attributes.get(var_name) hash_name = self.__hashes.get(var_name) if content is None and hash_name is None: return self value = self.__gui._get_content(hash_name or var_name, content, image) if hash_name: hash_name = self.__get_typed_hash_name(hash_name, PropertyType.image if image else PropertyType.content) if hash_name: self.__set_react_attribute( var_name, _get_client_var_name(hash_name), ) return self.set_attribute(_to_camel_case(f"default_{var_name}"), value) def _set_lov(self, var_name="lov", property_name: t.Optional[str] = None): property_name = var_name if property_name is None else property_name self.__set_list_of_(f"default_{property_name}") if hash_name := self.__hashes.get(var_name): hash_name = self.__get_typed_hash_name(hash_name, PropertyType.lov) self.__update_vars.append(f"{property_name}={hash_name}") self.__set_react_attribute(property_name, hash_name) return self def __set_dynamic_string_list(self, var_name: str, default_value: t.Any): hash_name = self.__hashes.get(var_name) loi = self.__attributes.get(var_name) if loi is None: loi = default_value if isinstance(loi, str): loi = [s.strip() for s in loi.split(";") if s.strip()] if isinstance(loi, list): self.__set_json_attribute(_to_camel_case(f"default_{var_name}"), loi) if hash_name: self.__update_vars.append(f"{var_name}={hash_name}") self.__set_react_attribute(var_name, hash_name) return self def __set_dynamic_number_attribute(self, var_name: str, default_value: t.Any): hash_name = self.__hashes.get(var_name) numVal = self.__attributes.get(var_name) if numVal is None: numVal = default_value if isinstance(numVal, str): try: numVal = float(numVal) except Exception as e: _warn(f"{self.__element_name}: {var_name} cannot be transformed into a number", e) numVal = 0 if isinstance(numVal, numbers.Number): self.__set_react_attribute(_to_camel_case(f"default_{var_name}"), numVal) elif numVal is not None: _warn(f"{self.__element_name}: {var_name} value is not valid ({numVal}).") if hash_name: hash_name = self.__get_typed_hash_name(hash_name, PropertyType.number) self.__update_vars.append(f"{var_name}={hash_name}") self.__set_react_attribute(var_name, hash_name) return self def __set_default_value( self, var_name: str, value: t.Optional[t.Any] = None, native_type: bool = False, var_type: t.Optional[PropertyType] = None, ): if value is None: value = self.__attributes.get(var_name) default_var_name = _to_camel_case(f"default_{var_name}") if isinstance(value, (datetime, date, time)): return self.set_attribute(default_var_name, _date_to_string(value)) elif isinstance(value, str): return self.set_attribute(default_var_name, value) elif native_type and isinstance(value, numbers.Number): return self.__set_react_attribute(default_var_name, value) elif value is None: return self.__set_react_attribute(default_var_name, "null") elif var_type == PropertyType.lov_value: # Done by _get_adapter return self elif isclass(var_type) and issubclass(var_type, _TaipyBase): # type: ignore return self.__set_default_value(var_name, t.cast(t.Callable, var_type)(value, "").get()) else: return self.__set_json_attribute(default_var_name, value) def __set_update_var_name(self, hash_name: str): return self.set_attribute("updateVarName", hash_name) def set_value_and_default( self, var_name: t.Optional[str] = None, with_update=True, with_default=True, native_type=False, var_type: t.Optional[PropertyType] = None, default_val: t.Any = None, ): """ TODO-undocumented Sets the value associated with the default property. Arguments: var_name (str): The property name (default to default property name). with_update (optional(bool)): Should the attribute be dynamic (default True). with_default (optional(bool)): Should a default attribute be set (default True). native_type (optional(bool)): If var_type == dynamic_number, parse the value to number. var_type (optional(PropertyType)): the property type (default to string). default_val (optional(Any)): the default value. """ var_name = self.__default_property_name if var_name is None else var_name if var_type == PropertyType.slider_value: if self.__attributes.get("lov"): var_type = PropertyType.lov_value native_type = False else: var_type = ( PropertyType.dynamic_lo_numbers if isinstance(self.__attributes.get("value"), list) else PropertyType.dynamic_number ) native_type = True if var_type == PropertyType.dynamic_boolean: return self.set_attributes([(var_name, var_type, bool(default_val), with_update)]) if hash_name := self.__hashes.get(var_name): hash_name = self.__get_typed_hash_name(hash_name, var_type) self.__set_react_attribute( _to_camel_case(var_name), _get_client_var_name(hash_name), ) if with_update: self.__set_update_var_name(hash_name) if with_default: if native_type: val = self.__attributes.get(var_name) if native_type and isinstance(val, str): with contextlib.suppress(Exception): val = float(val) self.__set_default_value(var_name, val, native_type=native_type) else: self.__set_default_value(var_name, var_type=var_type) else: value = self.__attributes.get(var_name) if value is not None: if native_type: if isinstance(value, str): with contextlib.suppress(Exception): value = float(value) if isinstance(value, (int, float)): return self.__set_react_attribute(_to_camel_case(var_name), value) self.set_attribute(_to_camel_case(var_name), value) return self def _set_labels(self, var_name: str = "labels"): if value := self.__attributes.get(var_name): if _is_boolean_true(value): return self.__set_react_attribute(_to_camel_case(var_name), True) elif isinstance(value, (dict, _MapDict)): return self.set_dict_attribute(var_name) return self def _set_partial(self): if self.__control_type not in _Builder.__BLOCK_CONTROLS: return self if partial := self.__attributes.get("partial"): if self.__attributes.get("page"): _warn(f"{self.__element_name} control: page and partial should not be both defined.") if isinstance(partial, Partial): self.__attributes["page"] = partial._route self.__set_react_attribute("partial", partial._route) self.__set_react_attribute("defaultPartial", True) return self def _set_propagate(self): val = self.__get_boolean_attribute("propagate", self.__gui._config.config.get("propagate")) return self if val else self.set_boolean_attribute("propagate", False) def __set_refresh_on_update(self): if self.__update_vars: self.set_attribute("updateVars", ";".join(self.__update_vars)) return self def _set_table_pagesize_options(self, default_size=[50, 100, 500]): page_size_options = self.__attributes.get("page_size_options", default_size) if isinstance(page_size_options, str): try: page_size_options = [int(s.strip()) for s in page_size_options.split(";")] except Exception as e: _warn(f"{self.__element_name}: page_size_options value is invalid ({page_size_options})", e) if isinstance(page_size_options, list): self.__set_json_attribute("pageSizeOptions", page_size_options) else: _warn(f"{self.__element_name}: page_size_options should be a list.") return self def _set_input_type(self, type_name: str, allow_password=False): if allow_password and self.__get_boolean_attribute("password", False): return self.set_attribute("type", "password") return self.set_attribute("type", type_name) def _set_kind(self): if self.__attributes.get("theme", False): self.set_attribute("kind", "theme") return self def __get_typed_hash_name(self, hash_name: str, var_type: t.Optional[PropertyType]) -> str: if taipy_type := _get_taipy_type(var_type): expr = self.__gui._get_expr_from_hash(hash_name) hash_name = self.__gui._evaluate_bind_holder(taipy_type, expr) return hash_name def __set_dynamic_bool_attribute(self, name: str, def_val: t.Any, with_update: bool, update_main=True): hash_name = self.__hashes.get(name) val = self.__get_boolean_attribute(name, def_val) default_name = f"default_{name}" if hash_name is not None else name if val != def_val: self.set_boolean_attribute(default_name, val) if hash_name is not None: hash_name = self.__get_typed_hash_name(hash_name, PropertyType.dynamic_boolean) self.__set_react_attribute(_to_camel_case(name), _get_client_var_name(hash_name)) if with_update: if update_main: self.__set_update_var_name(hash_name) else: self.__update_vars.append(f"{_to_camel_case(name)}={hash_name}") def __set_dynamic_property_without_default( self, name: str, property_type: PropertyType, optional: t.Optional[bool] = False ): hash_name = self.__hashes.get(name) if hash_name is None: if not optional: _warn(f"{self.__element_name}.{name} should be bound.") else: hash_name = self.__get_typed_hash_name(hash_name, property_type) self.__update_vars.append(f"{_to_camel_case(name)}={hash_name}") self.__set_react_attribute(_to_camel_case(name), _get_client_var_name(hash_name)) return self def __set_html_content(self, name: str, property_name: str, property_type: PropertyType): hash_name = self.__hashes.get(name) if not hash_name: return self front_var = self.__get_typed_hash_name(hash_name, property_type) self.set_attribute( _to_camel_case(f"default_{property_name}"), self.__gui._get_user_content_url( None, { "variable_name": front_var, self.__gui._HTML_CONTENT_KEY: str(_time.time()), }, ), ) return self.__set_react_attribute(_to_camel_case(property_name), _get_client_var_name(front_var)) def set_attributes(self, attributes: t.List[tuple]): # noqa: C901 """ TODO-undocumented Sets the attributes from the property with type and default value. Arguments: attributes (list(tuple)): The list of attributes as (property name, property type, default value). """ for attr in attributes: if not isinstance(attr, tuple): attr = (attr,) var_type = _get_tuple_val(attr, 1, PropertyType.string) if var_type == PropertyType.boolean: def_val = _get_tuple_val(attr, 2, False) val = self.__get_boolean_attribute(attr[0], def_val) if val != def_val: self.set_boolean_attribute(attr[0], val) elif var_type == PropertyType.dynamic_boolean: self.__set_dynamic_bool_attribute( attr[0], _get_tuple_val(attr, 2, False), _get_tuple_val(attr, 3, False), _get_tuple_val(attr, 4, True), ) elif var_type == PropertyType.number: self.set_number_attribute(attr[0], _get_tuple_val(attr, 2, None)) elif var_type == PropertyType.dynamic_number: self.__set_dynamic_number_attribute(attr[0], _get_tuple_val(attr, 2, None)) elif var_type == PropertyType.string: self.__set_string_attribute(attr[0], _get_tuple_val(attr, 2, None), _get_tuple_val(attr, 3, True)) elif var_type == PropertyType.dynamic_string: self.__set_dynamic_string_attribute( attr[0], _get_tuple_val(attr, 2, None), _get_tuple_val(attr, 3, False) ) elif var_type == PropertyType.string_list: self.__set_list_attribute( attr[0], self.__hashes.get(attr[0]), self.__attributes.get(attr[0]), str, False ) elif var_type == PropertyType.function: self.__set_function_attribute(attr[0], _get_tuple_val(attr, 2, None), _get_tuple_val(attr, 3, True)) elif var_type == PropertyType.react: prop_name = _to_camel_case(attr[0]) if hash_name := self.__hashes.get(attr[0]): self.__update_vars.append(f"{prop_name}={hash_name}") self.__set_react_attribute(prop_name, hash_name) else: self.__set_react_attribute(prop_name, self.__attributes.get(attr[0], _get_tuple_val(attr, 2, None))) elif var_type == PropertyType.broadcast: self.__set_react_attribute( _to_camel_case(attr[0]), _get_broadcast_var_name(_get_tuple_val(attr, 2, None)) ) elif var_type == PropertyType.string_or_number: self.__set_string_or_number_attribute(attr[0], _get_tuple_val(attr, 2, None)) elif var_type == PropertyType.dict: self.set_dict_attribute(attr[0], _get_tuple_val(attr, 2, None)) elif var_type == PropertyType.dynamic_dict: self.set_dynamic_dict_attribute(attr[0], _get_tuple_val(attr, 2, None)) elif var_type == PropertyType.dynamic_list: self.__set_dynamic_string_list(attr[0], _get_tuple_val(attr, 2, None)) elif var_type == PropertyType.boolean_or_list: if _is_boolean(self.__attributes.get(attr[0])): self.__set_dynamic_bool_attribute(attr[0], _get_tuple_val(attr, 2, False), True, update_main=False) else: self.__set_dynamic_string_list(attr[0], _get_tuple_val(attr, 2, None)) elif var_type == PropertyType.data: self.__set_dynamic_property_without_default(attr[0], var_type) elif var_type == PropertyType.lov: self._get_adapter(attr[0]) # need to be called before set_lov self._set_lov(attr[0]) elif var_type == PropertyType.lov_value: self.__set_dynamic_property_without_default( attr[0], var_type, _get_tuple_val(attr, 2, None) == "optional" ) elif var_type == PropertyType.toHtmlContent: self.__set_html_content(attr[0], "page", var_type) elif isclass(var_type) and issubclass(var_type, _TaipyBase): if hash_name := self.__hashes.get(attr[0]): prop_name = _to_camel_case(attr[0]) expr = self.__gui._get_expr_from_hash(hash_name) hash_name = self.__gui._evaluate_bind_holder(var_type, expr) self.__update_vars.append(f"{prop_name}={hash_name}") self.__set_react_attribute(prop_name, hash_name) self.__set_refresh_on_update() return self def set_attribute(self, name: str, value: t.Any): """ TODO-undocumented Sets an attribute. Arguments: name (str): The name of the attribute. value (Any): The value of the attribute (must be json serializable). """ self.el.set(name, value) return self def get_element(self): """ TODO-undocumented Returns the xml.etree.ElementTree.Element """ return self.el def _build_to_string(self): el_str = str(etree.tostring(self.el, encoding="utf8").decode("utf8")) el_str = el_str.replace("<?xml version='1.0' encoding='utf8'?>\n", "") el_str = el_str.replace("/>", ">") return el_str, self.__element_name
import typing as t from .._warnings import _warn from ..types import NumberTypes from ..utils import _RE_PD_TYPE, _get_date_col_str_name, _MapDict def _add_to_dict_and_get(dico: t.Dict[str, t.Any], key: str, value: t.Any) -> t.Any: if key not in dico.keys(): dico[key] = value return dico[key] def _get_tuple_val(attr: tuple, index: int, default_val: t.Any) -> t.Any: return attr[index] if len(attr) > index else default_val def _get_columns_dict_from_list( col_list: t.Union[t.List[str], t.Tuple[str]], col_types_keys: t.List[str], value: t.Any ): col_dict = {} idx = 0 for col in col_list: if col in col_types_keys: col_dict[col] = {"index": idx} idx += 1 elif col: _warn( f'Error column "{col}" is not present in the Dataframe "{value.head(0) if hasattr(value, "head") else value}".' ) return col_dict def _get_columns_dict( # noqa: C901 value: t.Any, columns: t.Union[str, t.List[str], t.Tuple[str], t.Dict[str, t.Any], _MapDict], col_types: t.Optional[t.Dict[str, str]] = None, date_format: t.Optional[str] = None, number_format: t.Optional[str] = None, opt_columns: t.Optional[t.Set[str]] = None, ): if col_types is None: return None col_dict: t.Optional[dict] = None if isinstance(columns, str): col_dict = _get_columns_dict_from_list([s.strip() for s in columns.split(";")], col_types_keys, value) elif isinstance(columns, (list, tuple)): col_dict = _get_columns_dict_from_list(columns, col_types_keys, value) elif isinstance(columns, _MapDict): col_dict = columns._dict.copy() elif isinstance(columns, dict): col_dict = columns.copy() if not isinstance(col_dict, dict): _warn("Error: columns attributes should be a string, a list, a tuple or a dict.") col_dict = {} nb_cols = len(col_dict) if nb_cols == 0: for col in col_types_keys: col_dict[col] = {"index": nb_cols} nb_cols += 1 else: col_dict = {str(k): v for k, v in col_dict.items()} if opt_columns: for col in opt_columns: if col in col_types_keys and col not in col_dict: col_dict[col] = {"index": nb_cols} nb_cols += 1 idx = 0 for col, ctype in col_types.items(): col = str(col) if col in col_dict: re_type = _RE_PD_TYPE.match(ctype) grps = re_type.groups() if re_type else () ctype = grps[0] if grps else ctype col_dict[col]["type"] = ctype col_dict[col]["dfid"] = col if len(grps) > 4 and grps[4]: col_dict[col]["tz"] = grps[4] idx = _add_to_dict_and_get(col_dict[col], "index", idx) + 1 if ctype == "datetime": if date_format: _add_to_dict_and_get(col_dict[col], "format", date_format) col_dict[_get_date_col_str_name(col_types.keys(), col)] = col_dict.pop(col) # type: ignore elif number_format and ctype in NumberTypes: _add_to_dict_and_get(col_dict[col], "format", number_format) return col_dict
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from __future__ import annotations from datetime import date, datetime, time from json import JSONEncoder from pathlib import Path from flask.json.provider import DefaultJSONProvider from .._warnings import _warn from ..icon import Icon from ..utils import _date_to_string, _MapDict, _TaipyBase def _default(o): if isinstance(o, Icon): return o._to_dict() if isinstance(o, _MapDict): return o._dict if isinstance(o, _TaipyBase): return o.get() if isinstance(o, (datetime, date, time)): return _date_to_string(o) if isinstance(o, Path): return str(o) try: raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable") except Exception as e: _warn("Exception in JSONEncoder", e) return None class _TaipyJsonEncoder(JSONEncoder): def default(self, o): return _default(o) class _TaipyJsonProvider(DefaultJSONProvider): default = staticmethod(_default) # type: ignore sort_keys = False
from markdown.treeprocessors import Treeprocessor from ..builder import _Builder class _Postprocessor(Treeprocessor): @staticmethod def extend(md, gui, priority): instance = _Postprocessor(md) md.treeprocessors.register(instance, "taipy", priority) instance._gui = gui def run(self, root): MD_PARA_CLASSNAME = "md-para" for p in root.iter(): if p.tag == "p": classes = p.get("class") classes = f"{MD_PARA_CLASSNAME} {classes}" if classes else MD_PARA_CLASSNAME p.set("class", classes) p.tag = "div" if p != root: p.set("key", _Builder._get_key(p.tag)) return root
from markdown.inlinepatterns import InlineProcessor from .factory import _MarkdownFactory class _ControlPattern(InlineProcessor): __PATTERN = _MarkdownFactory._TAIPY_START + r"([a-zA-Z][\.a-zA-Z_$0-9]*)(.*?)" + _MarkdownFactory._TAIPY_END @staticmethod def extend(md, gui, priority): instance = _ControlPattern(_ControlPattern.__PATTERN, md) md.inlinePatterns.register(instance, "taipy", priority) instance._gui = gui def handleMatch(self, m, data): return _MarkdownFactory.create_element(self._gui, m.group(1), m.group(2)), m.start(0), m.end(0)
import re from markdown.blockprocessors import BlockProcessor from .factory import _MarkdownFactory class _StartBlockProcessor(BlockProcessor): __RE_FENCE_START = re.compile( _MarkdownFactory._TAIPY_START + r"([a-zA-Z][\.a-zA-Z_$0-9]*)\.start(.*?)" + _MarkdownFactory._TAIPY_END ) # start line __RE_OTHER_FENCE = re.compile( _MarkdownFactory._TAIPY_START + r"([a-zA-Z][\.a-zA-Z_$0-9]*)\.(start|end)(.*?)" + _MarkdownFactory._TAIPY_END ) # start or end tag @staticmethod def extend(md, gui, priority): instance = _StartBlockProcessor(md.parser) md.parser.blockprocessors.register(instance, "taipy", priority) instance._gui = gui def test(self, parent, block): return re.match(_StartBlockProcessor.__RE_FENCE_START, block) def run(self, parent, blocks): original_block = blocks[0] original_match = re.search(_StartBlockProcessor.__RE_FENCE_START, original_block) blocks[0] = re.sub(_StartBlockProcessor.__RE_FENCE_START, "", blocks[0], 1) tag = original_match.group(1) queue = [tag] # Find block with ending fence for block_num, block in enumerate(blocks): matches = re.findall(_StartBlockProcessor.__RE_OTHER_FENCE, block) for match in matches: if queue[-1] == match[0] and match[1] == "end": queue.pop() elif match[1] == "start": queue.append(match[0]) if not queue: # remove end fence blocks[block_num] = re.sub( _MarkdownFactory._TAIPY_START + tag + r"\.end(.*?)" + _MarkdownFactory._TAIPY_END, "", block, 1, ) # render fenced area inside a new div e = _MarkdownFactory.create_element(self._gui, original_match.group(1), original_match.group(2)) parent.append(e) # parse inside blocks self.parser.parseBlocks(e, blocks[: block_num + 1]) # remove used blocks del blocks[: block_num + 1] return True # or could have had no return statement # No closing marker! Restore and do nothing blocks[0] = original_block return False # equivalent to our test() routine returning False
from typing import Any from markdown.extensions import Extension from .blocproc import _StartBlockProcessor from .control import _ControlPattern from .postproc import _Postprocessor from .preproc import _Preprocessor class _TaipyMarkdownExtension(Extension): config = {"gui": ["", "Gui object for extension"]} def extendMarkdown(self, md): from ...gui import Gui gui = self.config["gui"][0] if not isinstance(gui, Gui): raise RuntimeError("Gui instance is not bound to Markdown Extension") md.registerExtension(self) _Preprocessor.extend(md, gui, 210) _ControlPattern.extend(md, gui, 205) _StartBlockProcessor.extend(md, gui, 175) _Postprocessor.extend(md, gui, 200)
import typing as t from ..factory import _Factory class _MarkdownFactory(_Factory): # Taipy Markdown tags _TAIPY_START = "TaIpY:" _TAIPY_END = ":tAiPy" _TAIPY_BLOCK_TAGS = ["layout", "part", "expandable", "dialog", "pane"] @staticmethod def create_element(gui, control_type: str, all_properties: str) -> t.Union[t.Any, str]: # Create properties dict from all_properties property_pairs = _Factory._PROPERTY_RE.findall(all_properties) properties = {property[0]: property[1] for property in property_pairs} builder_md = _Factory.call_builder(gui, control_type, properties) if builder_md is None: return f"<|INVALID SYNTAX - Control is '{control_type}'|>" return builder_md
import re import typing as t from typing import Any, List, Tuple from markdown.preprocessors import Preprocessor as MdPreprocessor from ..._warnings import _warn from ..builder import _Builder from .factory import _MarkdownFactory if t.TYPE_CHECKING: from ...gui import Gui class _Preprocessor(MdPreprocessor): # ---------------------------------------------------------------------- # Finds, in the Markdown text, control declaration constructs: # <|<some value>|> # or # <|<some value>|<control_type>|> # or # <|<some value>|<control_type>|<prop_name[=propvalue]>> # or # <|<control_type>|<prop_name[=propvalue]>> # # These constructs are converted a fragment that the ControlPattern # processes to create the components that get generated. # <control_type> prop_name="prop_value" ... # Note that if a value is provided before the control_type, it is set # as the default property value for that control type. # The default control type is 'text'. # ---------------------------------------------------------------------- # Control in Markdown __CONTROL_RE = re.compile(r"<\|(.*?)\|>") # Opening tag __OPENING_TAG_RE = re.compile(r"<([0-9a-zA-Z\_\.]*)\|((?:(?!\|>).)*)\s*$") # Closing tag __CLOSING_TAG_RE = re.compile(r"^\s*\|([0-9a-zA-Z\_\.]*)>") # Link in Markdown __LINK_RE = re.compile(r"(\[[^\]]*?\]\([^\)]*?\))") # Split properties and control type __SPLIT_RE = re.compile(r"(?<!\\\\)\|") # Property syntax: '<prop_name>[=<prop_value>]' # If <prop_value> is omitted: # '<prop_name>' is equivalent to '<prop_name>=true' # 'not <prop_name>' is equivalent to '<prop_name>=false' # 'not', 'dont', 'don't' are equivalent in this context # Note 1: 'not <prop_name>=<prop_value>' is an invalid syntax # Note 2: Space characters after the equal sign are significative __PROPERTY_RE = re.compile(r"((?:don'?t|not)\s+)?([a-zA-Z][\.a-zA-Z_$0-9]*(?:\[(?:.*?)\])?)\s*(?:=(.*))?$") _gui: "Gui" @staticmethod def extend(md, gui, priority): instance = _Preprocessor(md) md.preprocessors.register(instance, "taipy", priority) instance._gui = gui def _make_prop_pair(self, prop_name: str, prop_value: str) -> Tuple[str, str]: # Un-escape pipe character in property value return (prop_name, prop_value.replace("\\|", "|")) def run(self, lines: List[str]) -> List[str]: new_lines = [] tag_queue = [] for line_count, line in enumerate(lines, start=1): new_line = "" last_index = 0 # Opening tags m = _Preprocessor.__OPENING_TAG_RE.search(line) if m is not None: tag = "part" properties: List[Tuple[str, str]] = [] if m.group(2): tag, properties = self._process_control(m.group(2), line_count, tag) if tag in _MarkdownFactory._TAIPY_BLOCK_TAGS: tag_queue.append((tag, line_count, m.group(1) or None)) new_line_delimeter = "\n" if line.startswith("<|") else "\n\n" line = ( line[: m.start()] + new_line_delimeter + _MarkdownFactory._TAIPY_START + tag + _MarkdownFactory._START_SUFFIX ) for property in properties: prop_value = property[1].replace('"', '\\"') line += f' {property[0]}="{prop_value}"' line += _MarkdownFactory._TAIPY_END + new_line_delimeter else: _warn(f"Invalid tag name '{tag}' in line {line_count}.") # Other controls for m in _Preprocessor.__CONTROL_RE.finditer(line): control_name, properties = self._process_control(m.group(1), line_count) new_line += line[last_index : m.start()] control_text = _MarkdownFactory._TAIPY_START + control_name for property in properties: prop_value = property[1].replace('"', '\\"') control_text += f' {property[0]}="{prop_value}"' control_text += _MarkdownFactory._TAIPY_END new_line += control_text last_index = m.end() new_line = line if last_index == 0 else new_line + line[last_index:] # Add key attribute to links line = new_line new_line = "" last_index = 0 for m in _Preprocessor.__LINK_RE.finditer(line): new_line += line[last_index : m.end()] new_line += "{: key=" + _Builder._get_key("link") + "}" last_index = m.end() new_line = line if last_index == 0 else new_line + line[last_index:] # Look for a closing tag m = _Preprocessor.__CLOSING_TAG_RE.search(new_line) if m is not None: if len(tag_queue): open_tag, open_tag_line_count, open_tag_identifier = tag_queue.pop() close_tag_identifier = m.group(1) if close_tag_identifier and not open_tag_identifier: _warn( f"Missing opening '{open_tag}' tag identifier '{close_tag_identifier}' in line {open_tag_line_count}." ) if open_tag_identifier and not close_tag_identifier: _warn( f"Missing closing '{open_tag}' tag identifier '{open_tag_identifier}' in line {line_count}." ) if close_tag_identifier and open_tag_identifier and close_tag_identifier != open_tag_identifier: _warn( f"Unmatched '{open_tag}' tag identifier in line {open_tag_line_count} and line {line_count}." ) new_line = ( new_line[: m.start()] + _MarkdownFactory._TAIPY_START + open_tag + _MarkdownFactory._END_SUFFIX + _MarkdownFactory._TAIPY_END + "\n" + new_line[m.end() :] ) else: new_line = ( new_line[: m.start()] + f"<div>No matching opened tag on line {line_count}</div>" + new_line[m.end() :] ) _warn(f"Line {line_count} has an unmatched closing tag.") # append the new line new_lines.append(new_line) # Issue #337: add an empty string at the beginning of new_lines list if there is not one # so that markdown extension would be able to render properly if new_lines and new_lines[0] != "": new_lines.insert(0, "") # Check for tags left unclosed (but close them anyway) for tag, line_no, _ in tag_queue: new_lines.append( _MarkdownFactory._TAIPY_START + tag + _MarkdownFactory._END_SUFFIX + _MarkdownFactory._TAIPY_END ) _warn(f"Opened tag {tag} in line {line_no} is not closed.") return new_lines def _process_control( self, prop_string: str, line_count: int, default_control_name: str = _MarkdownFactory.DEFAULT_CONTROL ) -> Tuple[str, List[Tuple[str, str]]]: fragments = [f for f in _Preprocessor.__SPLIT_RE.split(prop_string) if f] control_name = None default_prop_name = None default_prop_value = None properties: List[Tuple[str, Any]] = [] for fragment in fragments: if control_name is None and _MarkdownFactory.get_default_property_name(fragment): control_name = fragment elif control_name is None and default_prop_value is None: default_prop_value = fragment elif prop_match := _Preprocessor.__PROPERTY_RE.match(fragment): not_prefix = prop_match.group(1) prop_name = prop_match.group(2) val = prop_match.group(3) if not_prefix and val: _warn(f"Negated property {prop_name} value ignored at {line_count}.") prop_value = "True" if not_prefix: prop_value = "False" elif val: prop_value = val properties.append(self._make_prop_pair(prop_name, prop_value)) elif len(fragment) > 1 and fragment[0] == "{" and fragment[-1] == "}": properties.append(self._make_prop_pair(fragment[1:-1], fragment)) else: _warn(f"Bad Taipy property format at line {line_count}: '{fragment}'.") if control_name is None: if properties and all(attribute != properties[0][0] for attribute in _MarkdownFactory._TEXT_ATTRIBUTES): control_name = properties[0][0] properties = properties[1:] _warn(f'Unrecognized control {control_name} at line {line_count}: "<|{prop_string}|>".') else: control_name = default_control_name if default_prop_value is not None: default_prop_name = _MarkdownFactory.get_default_property_name(control_name) # Set property only if it is not already defined if default_prop_name and default_prop_name not in [x[0] for x in properties]: properties.insert(0, self._make_prop_pair(default_prop_name, default_prop_value)) return control_name, properties
from .parser import _TaipyHTMLParser
import typing as t from ..factory import _Factory class _HtmlFactory(_Factory): @staticmethod def create_element(gui, namespace: str, control_type: str, all_properties: t.Dict[str, str]) -> t.Tuple[str, str]: builder_html = _Factory.call_builder(gui, f"{namespace}.{control_type}", all_properties, True) if builder_html is None: return f"<div>INVALID SYNTAX - Control is '{namespace}:{control_type}'</div>", "div" return builder_html # type: ignore
import re import typing as t from html.parser import HTMLParser from ..._warnings import _warn from .factory import _HtmlFactory class _TaipyHTMLParser(HTMLParser): __TAIPY_NAMESPACE_RE = re.compile(r"([a-zA-Z\_]+):([a-zA-Z\_]*)") def __init__(self, gui): super().__init__() self._gui = gui self.body = "" self.head = [] self.taipy_tag = None self.tag_mapping = {} self.is_body = True self.head_tag = None self._line_count = 0 self._tag_queue = [] # @override def handle_starttag(self, tag, props) -> None: self._tag_queue.append((tag, self._line_count)) if tag == "html": return if self.head_tag is not None: self.head.append(self.head_tag) self.head_tag = None if self.taipy_tag is not None: self.parse_taipy_tag() if tag == "head": self.is_body = False elif tag == "body": self.is_body = True elif m := self.__TAIPY_NAMESPACE_RE.match(tag): self.taipy_tag = _TaipyTag(m.group(1), m.group(2), props) elif not self.is_body: head_props = {prop[0]: prop[1] for prop in props} self.head_tag = {"tag": tag, "props": head_props, "content": ""} else: self.append_data(str(self.get_starttag_text())) # @override def handle_data(self, data: str) -> None: data = data.strip() if data and self.taipy_tag is not None and self.taipy_tag.set_value(data): self.parse_taipy_tag() elif not self.is_body and self.head_tag is not None: self.head_tag["content"] = data else: self.append_data(data) # @override def handle_endtag(self, tag) -> None: if not self._tag_queue: _warn(f"Closing '{tag}' at line {self._line_count} is missing an opening tag.") else: opening_tag, opening_tag_line = self._tag_queue.pop() if opening_tag != tag: _warn( f"Opening tag '{opening_tag}' at line {opening_tag_line} has no matching closing tag '{tag}' at line {self._line_count}." ) if tag in ["head", "body", "html"]: return if self.taipy_tag is not None: self.parse_taipy_tag() if not self.is_body: self.head.append(self.head_tag) self.head_tag = None elif tag in self.tag_mapping: self.append_data(f"</{self.tag_mapping[tag]}>") else: self.append_data(f"</{tag}>") def append_data(self, data: str) -> None: if self.is_body: self.body += data def parse_taipy_tag(self) -> None: tp_string, tp_element_name = self.taipy_tag.parse(self._gui) self.append_data(tp_string) self.tag_mapping[f"{self.taipy_tag.namespace}:{self.taipy_tag.control_type}"] = tp_element_name self.taipy_tag = None def get_jsx(self) -> str: return self.body def feed_data(self, data: str): data_lines = data.split("\n") for line, data_line in enumerate(data_lines): self._line_count = line + 1 self.feed(data_line) while self._tag_queue: opening_tag, opening_tag_line = self._tag_queue.pop() _warn(f"Opening tag '{opening_tag}' at line {opening_tag_line} has no matching closing tag.") class _TaipyTag(object): def __init__(self, namespace: str, tag_name: str, properties: t.List[t.Tuple[str, str]]) -> None: self.namespace = namespace self.control_type = tag_name self.properties = {prop[0]: prop[1] for prop in properties} self.has_set_value = False def set_value(self, value: str) -> bool: if self.has_set_value: return False property_name = _HtmlFactory.get_default_property_name(f"{self.namespace}.{self.control_type}") # Set property only if it is not already defined if property_name and property_name not in self.properties.keys(): self.properties[property_name] = value self.has_set_value = True return True def parse(self, gui) -> t.Tuple[str, str]: for k, v in self.properties.items(): self.properties[k] = v if v is not None else "true" # allow usage of 'class' property in html taipy tag if "class" in self.properties and "class_name" not in self.properties: self.properties["class_name"] = self.properties["class"] return _HtmlFactory.create_element(gui, self.namespace, self.control_type, self.properties)
import pandas as pd from taipy import Gui # ---- READ EXCEL ---- df = pd.read_excel( io="data/supermarkt_sales.xlsx", engine="openpyxl", sheet_name="Sales", skiprows=3, usecols="B:R", nrows=1000, ) # Add 'hour' column to dataframe df["hour"] = pd.to_datetime(df["Time"], format="%H:%M:%S").dt.hour # initialization of variables cities = list(df["City"].unique()) types = list(df["Customer_type"].unique()) genders = list(df["Gender"].unique()) city = cities customer_type = types gender = genders layout = {"margin": {"l": 220}} # Markdown for the entire page ## NOTE: {: .orange} references a color from main.css use to style my text ## <text| ## |text> ## "text" here is just a name given to my part/my section ## it has no meaning in the code page = """<|toggle|theme|> <|layout|columns=20 80|gap=30px| <sidebar| ## Please **filter**{: .orange} here: <|{city}|selector|lov={cities}|multiple|label=Select the City|dropdown|on_change=on_filter|width=100%|> <|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|width=100%|> <|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|width=100%|> |sidebar> <main_page| # πŸ“Š **Sales**{: .orange} Dashboard <|layout|columns=1 1 1| <total_sales| ## **Total**{: .orange} sales: ### US $ <|{int(df_selection["Total"].sum())}|> |total_sales> <average_rating| ## **Average**{: .orange} Rating: ### <|{round(df_selection["Rating"].mean(), 1)}|> <|{"⭐" * int(round(round(df_selection["Rating"].mean(), 1), 0))}|> |average_rating> <average_sale| ## Average Sales Per **Transaction**{: .orange}: ### US $ <|{round(df_selection["Total"].mean(), 2)}|> |average_sale> |> <br/> Display df_selection in an expandable <|Sales Table|expandable|expanded=False| <|{df_selection}|table|width=100%|page_size=5|rebuild|class_name=table|> |> <charts| <|{sales_by_hour}|chart|x=Hour|y=Total|type=bar|title=Sales by Hour|color=#ff462b|> <|{sales_by_product_line}|chart|x=Total|y=Product|type=bar|orientation=h|title=Sales by Product|layout={layout}|color=#ff462b|> |charts> |main_page> |> Code from [Coding is Fun](https://github.com/Sven-Bo) Get the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard) """ def filter(city, customer_type, gender): df_selection = df[ df["City"].isin(city) & df["Customer_type"].isin(customer_type) & df["Gender"].isin(gender) ] # SALES BY PRODUCT LINE [BAR CHART] sales_by_product_line = ( df_selection[["Product line", "Total"]] .groupby(by=["Product line"]) .sum()[["Total"]] .sort_values(by="Total") ) sales_by_product_line["Product"] = sales_by_product_line.index # SALES BY HOUR [BAR CHART] sales_by_hour = ( df_selection[["hour", "Total"]].groupby(by=["hour"]).sum()[["Total"]] ) sales_by_hour["Hour"] = sales_by_hour.index return df_selection, sales_by_product_line, sales_by_hour def on_filter(state): state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter( state.city, state.customer_type, state.gender ) if __name__ == "__main__": # initialize dataframes df_selection, sales_by_product_line, sales_by_hour = filter( city, customer_type, gender ) # run the app Gui(page).run()
from taipy.gui import Markdown import numpy as np import json from data.data import data type_selector = ['Absolute', 'Relative'] selected_type = type_selector[0] def initialize_world(data): data_world = data.groupby(["Country/Region", 'Date'])\ .sum()\ .reset_index() with open("data/pop.json","r") as f: pop = json.load(f) data_world['Population'] = data_world['Country/Region'].map(lambda x: pop.get(x, [None, 0])[1]) data_world = data_world.dropna()\ .reset_index() data_world['Deaths/100k'] = data_world.loc[:,'Deaths']/data_world.loc[:,'Population']*100000 data_world_pie_absolute = data_world[['Country/Region', 'Deaths', 'Recovered', 'Confirmed']].groupby(["Country/Region"])\ .max()\ .sort_values(by='Deaths', ascending=False)[:20]\ .reset_index() data_world_pie_relative = data_world[['Country/Region', 'Deaths/100k']].groupby(["Country/Region"])\ .max()\ .sort_values(by='Deaths/100k', ascending=False)[:20]\ .reset_index() country_absolute = data_world_pie_absolute['Country/Region'].unique().tolist() country_relative = data_world_pie_relative.loc[:,'Country/Region'].unique().tolist() data_world_evolution_absolute = data_world[data_world['Country/Region'].str.contains('|'.join(country_absolute),regex=True)] data_world_evolution_absolute = data_world_evolution_absolute.pivot(index='Date', columns='Country/Region', values='Deaths')\ .reset_index() data_world_evolution_relative = data_world[data_world['Country/Region'].str.contains('|'.join(country_relative),regex=True)] data_world_evolution_relative = data_world_evolution_relative.pivot(index='Date', columns='Country/Region', values='Deaths/100k')\ .reset_index() return data_world, data_world_pie_absolute, data_world_pie_relative, data_world_evolution_absolute, data_world_evolution_relative data_world,\ data_world_pie_absolute, data_world_pie_relative,\ data_world_evolution_absolute, data_world_evolution_relative = initialize_world(data) data_world_evolution_absolute_properties = {"x":"Date"} cols = [col for col in data_world_evolution_absolute.columns if col != "Date"] for i in range(len(cols)): data_world_evolution_absolute_properties[f'y[{i}]'] = cols[i] data_world_evolution_relative_properties = {"x":"Date"} cols = [col for col in data_world_evolution_relative.columns if col != "Date"] for i in range(len(cols)): data_world_evolution_relative_properties[f'y[{i}]'] = cols[i] world_md = Markdown("pages/world/world.md")
from taipy.gui import Markdown, notify import datetime as dt selected_data_node = None selected_scenario = None selected_date = None default_result = {"Date": [dt.datetime(2020,10,1)], "Deaths": [0], "ARIMA": [0], "Linear Regression": [0]} def on_submission_change(state, submitable, details): if details['submission_status'] == 'COMPLETED': state.refresh('selected_scenario') notify(state, "success", "Predictions ready!") print("Predictions ready!") elif details['submission_status'] == 'FAILED': notify(state, "error", "Submission failed!") print("Submission failed!") else: notify(state, "info", "In progress...") print("In progress...") def on_change_params(state): if state.selected_date.year < 2020 or state.selected_date.year > 2021: notify(state, "error", "Invalid date! Must be between 2020 and 2021") state.selected_date = dt.datetime(2020,10,1) return state.selected_scenario.date.write(state.selected_date.replace(tzinfo=None)) state.selected_scenario.country.write(state.selected_country) notify(state, "success", "Scenario parameters changed!") state['Country'].on_change_country(state) def on_change(state, var_name, var_value): if var_name == 'selected_scenario' and var_value: state.selected_date = state.selected_scenario.date.read() state.selected_country = state.selected_scenario.country.read() predictions_md = Markdown("pages/predictions/predictions.md")
from taipy.gui import Gui from math import cos, exp value = 10 page = """ Markdown # Taipy *Demo* Value: <|{value}|text|> <|{value}|slider|on_change=on_slider|> <|{data}|chart|> """ def compute_data(decay:int)->list: return [cos(i/6) * exp(-i*decay/600) for i in range(100)] def on_slider(state): state.data = compute_data(state.value) data = compute_data(value) Gui(page).run(use_reloader=True, port=5002)
from taipy.gui import Gui import taipy as tp from pages.country.country import country_md from pages.world.world import world_md from pages.map.map import map_md from pages.predictions.predictions import predictions_md, selected_scenario from pages.root import root, selected_country, selector_country from config.config import Config pages = { '/':root, "Country":country_md, "World":world_md, "Map":map_md, "Predictions":predictions_md } gui_multi_pages = Gui(pages=pages) if __name__ == '__main__': tp.Core().run() gui_multi_pages.run(title="Covid Dashboard")
import yfinance as yf from taipy.gui import Gui from taipy.gui.data.decimator import MinMaxDecimator, RDP, LTTB df_AAPL = yf.Ticker("AAPL").history(interval="1d", period="100Y") df_AAPL["DATE"] = df_AAPL.index.astype("int64").astype(float) n_out = 500 decimator_instance = MinMaxDecimator(n_out=n_out) decimate_data_count = len(df_AAPL) page = """ # Decimator From a data length of <|{len(df_AAPL)}|> to <|{n_out}|> ## Without decimator <|{df_AAPL}|chart|x=DATE|y=Open|> ## With decimator <|{df_AAPL}|chart|x=DATE|y=Open|decimator=decimator_instance|> """ gui = Gui(page) gui.run(port=5026)
# Main Application import os import re from taipy.gui import Gui, notify, navigate import pandas as pd from datetime import datetime import chardet from utils import ( contains_related_word, categorize_columns_by_datatype, generate_prompts, all_chart_types, ) from similar_columns import replace_values_in_string import csv import os from llm_utils import ( prompt_localllm_fsl, prompt_localllm_fsl_plot, ) MAX_FILE_SIZE_MB = 22 # Maximum allowed file size in MB LOCAL_LLM_URL = "http://20.234.124.198:5000/generate_code" ORIGINAL_DATA_PATH = "sales_data_sample.csv" original_data = pd.read_csv(ORIGINAL_DATA_PATH, sep=",", encoding="ISO-8859-1") original_data["ORDERDATE"] = pd.to_datetime(original_data["ORDERDATE"]) original_data = original_data.sort_values(by="ORDERDATE") df = pd.DataFrame(original_data) df.columns = df.columns.str.upper() default_data = original_data.copy() data = df processed_data = original_data.copy() user_input = "" content = None data_path = "" render_examples = True show_tips = True past_prompts = [] plot_result = "" suggested_prompts = [""] * 5 sample_user_inputs = [ "What are the 5 most profitable cities?", "Plot in a bar chart sales of the 5 most profitable cities", "Plot sales by product line in a pie chart", "Plot in a pie chart sales by country", "Display in a bar chart sales by product line", ] show_suggested_prompts = False prompt_mode = True data_mode = False show_modified_data = True edit_table = pd.DataFrame() debug_log = "" expandPromptHelp = False CONTEXT_PATH = "context_data.csv" context_data = pd.read_csv(CONTEXT_PATH, sep=";") context = "" for instruction, code in zip(context_data["instruction"], context_data["code"]): example = f"{instruction}\n{code}\n" context += example # Categorize columns by type for the prompt builder categorized_columns = categorize_columns_by_datatype(df) float_columns = categorized_columns["float_columns"] int_columns = categorized_columns["int_columns"] string_columns = categorized_columns["string_columns"] date_columns = categorized_columns["date_columns"] float_int_columns = float_columns + int_columns date_string_columns = date_columns + string_columns date_string_columns_toggle = date_string_columns.copy() selected_chart_types = "" selected_date_string_columns = "" selected_float_int_columns = "" def reset_prompt_builder(state) -> None: """ Resets the list of possible values for the prompt builder """ state.categorized_columns = categorize_columns_by_datatype(state.data) divide_columns(state) state.selected_chart_types = "" state.selected_date_string_columns = "" state.selected_float_int_columns = "" def divide_columns(state) -> None: """ Divides columns by type for the prompt builder """ state.float_columns = state.categorized_columns["float_columns"] state.int_columns = state.categorized_columns["int_columns"] state.string_columns = state.categorized_columns["string_columns"] state.date_columns = state.categorized_columns["date_columns"] state.float_int_columns = state.float_columns + state.int_columns state.date_string_columns = state.date_columns + state.string_columns state.date_string_columns_toggle = state.date_string_columns.copy() def plot(state) -> None: """ Prompts local starcoder to modify or plot data Args: state (State): Taipy GUI state """ state.p.update_content(state, "") response = prompt_localllm_fsl_plot( state.data.head(), state.user_input, 32, LOCAL_LLM_URL ) code = re.split("\n", response[0])[0] code = f"<{code}" if not code.endswith("|>"): code += "|>" # state.plot_result = plot_prompt(API_URL, headers, context, state, state.user_input) output_code = replace_values_in_string(code, state.data.columns.tolist()) state.plot_result = output_code print(f"Plot Code: {state.plot_result}") state.debug_log = state.debug_log + f"; Generated Taipy Code: {state.plot_result}" state.p.update_content(state, state.plot_result) notify(state, "success", "Plot Updated!") def uppercase_field_labels(code): # Use regular expression to find text with eventual commas between [' and '] pattern = r"\['(.*?)'\]" modified_code = re.sub(pattern, lambda match: f"['{match.group(1).upper()}']", code) return modified_code def modify_data(state) -> None: """ Prompts local starcoder to modify or plot data """ notify(state, "info", "Running query...") reset_data(state) state.content = None current_time = datetime.now().strftime("%H:%M") state.past_prompts = [current_time + "\n" + state.user_input] + state.past_prompts print(f"User Input: {state.user_input}") response = prompt_localllm_fsl(state.data, state.user_input, 64, LOCAL_LLM_URL) # code = re.split('|', response[0])[0] code = response[0].split("|")[0] code = uppercase_field_labels(code) plot_index = code.find(".plot") if plot_index != -1: code = code[:plot_index] # Create a dictionary for globals and locals to use in the exec() function globals_dict = {} locals_dict = {"df": state.data} # Include 'df' if it's not already available # Execute the code as a string import_code = "import pandas as pd;" # If code does not start with "df = ", add it if not code.startswith("df = "): code = "df = " + code print(f"Data Code: {code}") state.debug_log = f"Generated Pandas Code: {code}" try: exec(import_code + code, globals_dict, locals_dict) pandas_output = locals_dict["df"] except Exception as e: on_exception(state, "modify_data", e) return # Parse if output is DataFrame, Series, string... if isinstance(pandas_output, pd.DataFrame): state.data = pandas_output notify(state, "success", "Data successfully modified!") elif isinstance(pandas_output, pd.Series): state.data = pd.DataFrame(pandas_output).reset_index() notify(state, "success", "Data successfully modified!") # If int, str, float, bool, list elif isinstance(pandas_output, (int, str, float, bool, list)): state.data = pd.DataFrame([pandas_output]) notify(state, "success", "Data successfully modified!") # Everything else else: state.data = state.data state.show_modified_data = True # If user asked for a plot if contains_related_word(state.user_input): state.show_modified_data = True plot(state) def on_exception(state, function_name: str, ex: Exception) -> None: """ Catches exceptions and notifies user in Taipy GUI Args: state (State): Taipy GUI state function_name (str): Name of function where exception occured ex (Exception): Exception """ notify(state, "error", f"An error occured in {function_name}: {ex}") def reset_data(state) -> None: """ Resets data to original data, resets plot """ state.data = state.default_data.copy() def example(state, id, _) -> None: """ Runs an example prompt """ _index = int(id.split("example")[1]) state.user_input = state.sample_user_inputs[_index] modify_data(state) def suggest_prompt(state, id, _) -> None: """ Runs an suggest prompt """ _index = int(id.split("suggest")[1]) state.user_input = state.suggested_prompts[_index] modify_data(state) def remove_spaces_and_convert_to_numeric(value): if isinstance(value, str): return pd.to_numeric(value.replace(" ", ""), errors="coerce") return value def read_data(file_path: str): """ Read csv file from a path and remove spaces from columns with numeric values Args: file_path: Path to csv file """ try: # Check the file size file_size_mb = os.path.getsize(file_path) / (1024 * 1024) # in MB if file_size_mb > MAX_FILE_SIZE_MB: print( f"File size exceeds {MAX_FILE_SIZE_MB}MB. Please choose a smaller file." ) return "Max_File" # Detect the file encoding with open(file_path, "rb") as file: result = chardet.detect(file.read()) detected_encoding = result["encoding"] # Detect the delimiter using csv.Sniffer try: with open(file_path, "r", encoding=detected_encoding) as file: sniffer = csv.Sniffer() sample_data = file.read(1024) # Read a sample of the data delimiter = sniffer.sniff(sample_data).delimiter except Exception as e: print(f"Error detecting delimiter: {e}") delimiter = "," output_csv_file_path = "modified_file.csv" rows = [] # Open the input CSV file for reading and the output CSV file for writing with open(file_path, "r") as input_file, open( output_csv_file_path, "w" ) as output_file: # Iterate through each line in the input file csv_reader = csv.reader(input_file) # Iterate through each row in the CSV file found_header = False for row in csv_reader: found = 0 for cell in row: if cell == "": found = found + 1 if found_header: rows.append(row) elif found <= 2: found_header = True rows.append(row) # Specify the CSV file path where you want to save the data csv_writer = csv.writer(output_file) for row in rows: csv_writer.writerow(row) # Read the data using detected encoding and delimiter df = pd.read_csv( output_csv_file_path, encoding=detected_encoding, delimiter=delimiter, on_bad_lines="skip", ) # Remove spaces in numeric columns columns_with_spaces = [] for column in df.columns: if df[column].dtype == "object": # Check if the column contains text if df[column].str.contains(r"\d{1,3}( \d{3})+").any(): columns_with_spaces.append(column) for column in columns_with_spaces: df[column] = df[column].apply(remove_spaces_and_convert_to_numeric) return df except Exception as e: print(f"Error reading data: {e}") return None def data_upload(state) -> None: """ Changes dataset to uploaded dataset Generate prompt suggestions """ state.p.update_content(state, "") state.suggested_prompts = [] state.show_tips = False content = read_data(state.data_path) if content is str: notify(state, "error", f"File size exceeds {MAX_FILE_SIZE_MB}MB.") return None state.default_data = content df = pd.DataFrame(state.default_data) df.columns = df.columns.str.upper() # get list of columns with same data types categorized_columns = categorize_columns_by_datatype(df) # prompt builder state.categorized_columns = categorize_columns_by_datatype(df) divide_columns(state) prompts = generate_prompts(state.categorized_columns, 5) state.suggested_prompts = prompts # Convert specified columns to datetime for column in categorized_columns["date_columns"]: df[column] = pd.to_datetime(df[column], errors="coerce") # Convert specified columns to string for column in categorized_columns["string_columns"]: df[column] = df[column].astype("string") state.data = df state.processed_data = state.default_data.copy() state.render_examples = False state.show_suggested_prompts = True show_columns_fix(state) def reset_app(state) -> None: """ Resets app to original state """ state.p.update_content(state, "") state.default_data = original_data.copy() reset_data(state) state.user_input = "" state.content = None state.data_path = "" state.render_examples = True state.show_tips = True state.past_prompts = [] state.plot_result = "" state.suggested_prompts = [""] * 5 state.show_suggested_prompts = False state.prompt_mode = True state.data_mode = False state.show_modified_data = True state.edit_table = pd.DataFrame() state.processed_data = original_data.copy() show_columns_fix(state) reset_prompt_builder(state) state.categorized_columns = categorize_columns_by_datatype(df) navigate(state, force=True) def show_prompt(state, id, action) -> None: """ Selects the active page between "Prompt" and "Data Processing" """ show_columns_fix(state) if "show_prompt_button" in id: state.prompt_mode = True state.data_mode = False if "show_data_processing_button" in id: state.prompt_mode = False state.data_mode = True def show_columns_fix(state): """ On Data Processing Page, generate the title and data type text fields """ # Get the titles and data types from the header try: df = pd.DataFrame(state.processed_data) title_row = df.columns.tolist() data_types = df.dtypes.tolist() state.edit_table = pd.DataFrame( [title_row, [reverse_types_dict[str(d)] for d in data_types]], columns=title_row, ) state.partial_columns_fix.update_content( state, """<|{edit_table}|table|show_all|on_edit=on_edit|width=100%|class_name=edit_table|> *Accepted values for types are: int, float, str, date, bool*{: .text-small} """, ) categorized_columns = categorize_columns_by_datatype(state.data) prompts = generate_prompts(categorized_columns, 5) state.suggested_prompts = prompts except Exception as e: print(f"Error reading data: {e}") return None def on_edit(state, var_name, action, payload): index = payload["index"] col = payload["col"] value = payload["value"] col = state.edit_table.columns.get_loc(col) if index == 0: on_title_change(state, index, col, value) elif index == 1: on_datatype_change(state, index, col, value) more_prompt(state) reset_prompt_builder(state) state.default_data = state.data.copy() def on_title_change(state, index, col, value): """ Changes the title of a column as requested by the user """ df = pd.DataFrame(state.processed_data) df.rename(columns={df.columns[col]: value}, inplace=True) state.data = state.processed_data.copy() show_columns_fix(state) types_dict = { "int": "int64", "float": "float64", "str": "string", "date": "datetime64[ns]", "bool": "bool", } reverse_types_dict = { "int64": "int", "float64": "float", "string": "str", "datetime64[ns]": "date", "bool": "bool", "object": "object", } def on_datatype_change(state, index, col, value): """ Changes the data type of a column as requested by the user """ # Check if value is in types_dict if value not in types_dict: notify( state, "error", "The only accepted values are: int, float, str, date, bool" ) return value = types_dict[value] df = pd.DataFrame(state.processed_data) if value in ["int64", "float64"]: notify(state, "info", "Non-numeric values will be removed") df.iloc[:, col] = pd.to_numeric(df.iloc[:, col], errors="coerce") df = df.dropna() df.iloc[:, col] = df.iloc[:, col].astype(value) state.data = state.processed_data.copy() show_columns_fix(state) def more_prompt(state) -> None: """ Generates more prompt suggestions """ df = pd.DataFrame(state.processed_data) categorized_columns = categorize_columns_by_datatype(df) prompts = generate_prompts(categorized_columns, 5) state.suggested_prompts = prompts def build_prompt(state) -> None: """ Generates a prompt using the prompt builder """ if state.selected_date_string_columns != "": state.user_input = f"Plot a {state.selected_chart_types} of {state.selected_float_int_columns} by {state.selected_date_string_columns}" else: state.user_input = ( f"Plot a {state.selected_chart_types} of {state.selected_float_int_columns}" ) modify_data(state) def on_select_change(state) -> None: """ Restricts the possible values for the prompt builder according to datatype """ if state.selected_chart_types == "histogram": state.date_string_columns_toggle = [] state.selected_date_string_columns = "" elif state.selected_chart_types == "scatter plot": state.date_string_columns_toggle = ( state.date_string_columns + state.float_int_columns ) else: state.date_string_columns_toggle = state.date_string_columns page = """ <|layout|columns=300px 1| <|part|render=True|class_name=sidebar| # Talk To **Taipy**{: .color-primary} # {: .logo-text} <|Reset App|button|on_action=reset_app|class_name=fullwidth plain|id=reset_app_button|> ### Previous activities ### {: .h5 .mt2 .mb-half} <|tree|lov={past_prompts[:5]}|class_name=past_prompts_list|multiple|> |> <|part|render=True|class_name=p2| <|part|class_name=tabs pl1 pr1| <|part|render={prompt_mode}| <|Prompt|button|on_action=show_prompt|id=show_prompt_button|class_name=tab active|> <|Data Preprocessing|button|on_action=show_prompt|id=show_data_processing_button|class_name=tab|> |> <|part|render={data_mode}| <|Prompt|button|on_action=show_prompt|id=show_prompt_button|class_name=tab|> <|Data Preprocessing|button|on_action=show_prompt|id=show_data_processing_button|class_name=tab active|> |> |> <|part|render={prompt_mode}| <|card ### Prompt ### {: .h4 .mt0 .mb-half} <|{user_input}|input|on_action=modify_data|class_name=fullwidth|label=Enter your prompt here|id=prompt|change_delay=550|> <|Need help for building a prompt?|expandable|expanded={expandPromptHelp}|class_name=prompt-help mt0| #### Prompt suggestions #### {: .h6 .mt1 .mb-half} <|part|render={show_tips}| <|{sample_user_inputs[0]}|button|on_action=example|class_name=button_link|id=example0|> <|{sample_user_inputs[1]}|button|on_action=example|class_name=button_link|id=example1|> <|{sample_user_inputs[2]}|button|on_action=example|class_name=button_link|id=example2|> <|{sample_user_inputs[3]}|button|on_action=example|class_name=button_link|id=example3|> <|{sample_user_inputs[4]}|button|on_action=example|class_name=button_link|id=example4|> |> <|part|render={show_suggested_prompts}| <|{suggested_prompts[0]}|button|on_action=suggest_prompt|class_name=button_link|id=suggest0|> <|{suggested_prompts[1]}|button|on_action=suggest_prompt|class_name=button_link|id=suggest1|> <|{suggested_prompts[2]}|button|on_action=suggest_prompt|class_name=button_link|id=suggest2|> <|{suggested_prompts[3]}|button|on_action=suggest_prompt|class_name=button_link|id=suggest3|> <|{suggested_prompts[4]}|button|on_action=suggest_prompt|class_name=button_link|id=suggest4|> <|More prompts|button|on_action=more_prompt|id=more_prompt_button|> |> #### Prompt builder ### {: .h6 .mt1 .mb-half} <|layout|columns=auto 1 auto 1 auto 1 auto|class_name=align-columns-center <| Plot a |> <|{selected_chart_types}|selector|lov={all_chart_types}|dropdown=True|on_change=on_select_change|class_name=fullwidth|id=chart_type_select|> <| of |> <|{selected_float_int_columns}|selector|lov={float_int_columns}|dropdown=True|on_change=on_select_change|class_name=fullwidth|id=float_int_select|> <| by |> <|{selected_date_string_columns}|selector|lov={date_string_columns_toggle}|dropdown=True|on_change=on_select_change|class_name=fullwidth|id=date_string_select|> <|Build|button|on_action=build_prompt|class_name=button_link|class_name=plain|> |> |> |> <|part|class_name=card mt1| <|part|render=False| ### Original Data Table ### {: .h4 .mt0 .mb-half} <|{original_data}|table|width=100%|page_size=5|rebuild|class_name=table|> <center> <|{content}|image|width=50%|> </center> |> <|part|render={show_modified_data}| <|Original Data Table|expandable|expanded=False| <|{default_data}|table|width=100%|page_size=5|rebuild|class_name=table|> |> <br /> ### Modified Data Table ### {: .h5 .mt0 .mb-half} <|{data}|table|width=100%|page_size=5|rebuild|class_name=table|> |> ### Graphs/Charts ### {: .h5 .mt1 .mb-half} <|part|partial={p}|> |> <|Debug Logs|expandable|expanded=True| <|{debug_log}|text|> |> |> <|part|render={data_mode}| <|card <|layout|columns=1 auto|class_name=align-columns-center ### Data Preprocessing ### {: .h4 .mt0 .mb-half} <|{data_path}|file_selector|on_action=data_upload|label=Upload your CSV file|class_name=plain|> |> #### Edit column names and data types ### {: .h6 .mt1 .mb-half} <|part|partial={partial_columns_fix}|> |> <|part|class_name=card mt1| ### Data Table ### {: .h4 .mt0 .mb-half} <|{data}|table|width=100%|page_size=5|rebuild|> |> |> <br /> Any issues or suggestions? Mail them to: **support@taipy.io**{: .color-primary} We only store the prompts you enter for the sole purpose of improving our product and counting daily active users. We do not store any of your data. For more information, please read our [Privacy Policy](https://www.taipy.io/privacy-policy/) |> |> """ gui = Gui(page) partial_columns_fix = gui.add_partial("") p = gui.add_partial("") gui.run(title="Talk To Taipy", margin="0rem", debug=True, use_reloader=True, port=5039)
# Create an app to upload a csv and display it in a table from taipy.gui import Gui import pandas as pd data = [] data_path = "" def data_upload(state): state.data = pd.read_csv(state.data_path) page = """ <|{data_path}|file_selector|on_action=data_upload|> <|{data}|table|> """ Gui(page).run()
import socket import pickle import math from threading import Thread from taipy.gui import Gui, State, invoke_callback, get_state_id import numpy as np import pandas as pd init_lat = 49.247 init_long = 1.377 factory_lat = 49.246 factory_long = 1.369 diff_lat = abs(init_lat - factory_lat) * 15 diff_long = abs(init_long - factory_long) * 15 lats_unique = np.arange(init_lat - diff_lat, init_lat + diff_lat, 0.001) longs_unique = np.arange(init_long - diff_long, init_long + diff_long, 0.001) countdown = 20 periods = 0 line_data = pd.DataFrame({"Time": [], "Max AQI": []}) drone_data = pd.DataFrame( { "Drone ID": [43, 234, 32, 23, 5, 323, 12, 238, 21, 84], "Battery Level": [ "86%", "56%", "45%", "12%", "85%", "67%", "34%", "78%", "90%", "100%", ], "AQI": [40, 34, 24, 22, 33, 45, 23, 34, 23, 34], "Status": [ "Moving", "Measuring", "Measuring", "Stopped", "Measuring", "Moving", "Moving", "Measuring", "Measuring", "Measuring", ], } ) HOST = "127.0.0.1" PORT = 65432 layout_map = { "mapbox": { "style": "open-street-map", "center": {"lat": init_lat, "lon": init_long}, "zoom": 13, }, "dragmode": "false", "margin": {"l": 0, "r": 0, "b": 0, "t": 0}, } layout_line = { "title": "Max Measured AQI over Time", "yaxis": {"range": [0, 150]}, } options = { "opacity": 0.8, "colorscale": "Bluered", "zmin": 0, "zmax": 140, "colorbar": {"title": "AQI"}, "hoverinfo": "none", } config = {"scrollZoom": False, "displayModeBar": False} def pollution(lat: float, long: float): """ Return pollution level in percentage Pollution should be centered around the factory Pollution should decrease with distance to factory Pollution should have an added random component Args: - lat: latitude - long: longitude Returns: - pollution level """ global countdown return 80 * (0.5 + 0.5 * math.sin(countdown / 20)) * math.exp( -(0.8 * (lat - factory_lat) ** 2 + 0.2 * (long - factory_long) ** 2) / 0.00005 ) + np.random.randint(0, 50) lats = [] longs = [] pollutions = [] times = [] max_pollutions = [] for lat in lats_unique: for long in longs_unique: lats.append(lat) longs.append(long) pollutions.append(pollution(lat, long)) data_province_displayed = pd.DataFrame( { "Latitude": lats, "Longitude": longs, "Pollution": pollutions, } ) max_pollution = data_province_displayed["Pollution"].max() # Socket handler def client_handler(gui: Gui, state_id_list: list): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((HOST, PORT)) s.listen() conn, _ = s.accept() while True: if data := conn.recv(1024 * 1024): pollutions = pickle.loads(data) print(f"Data received: {pollutions[:5]}") if hasattr(gui, "_server") and state_id_list: invoke_callback( gui, state_id_list[0], update_pollutions, [pollutions], ) else: print("Connection closed") break # Gui declaration state_id_list = [] Gui.add_shared_variable("pollutions") def on_init(state: State): state_id = get_state_id(state) if (state_id := get_state_id(state)) is not None and state_id != "": state_id_list.append(state_id) update_pollutions(state, pollutions) def update_pollutions(state: State, val): state.pollutions = val state.data_province_displayed = pd.DataFrame( { "Latitude": lats, "Longitude": longs, "Pollution": state.pollutions, } ) # Add an hour to the time state.periods = state.periods + 1 state.max_pollutions = state.max_pollutions + [max(state.pollutions)] state.times = pd.date_range( "2020-11-04", periods=len(state.max_pollutions), freq="H" ) state.line_data = pd.DataFrame( { "Time": state.times, "Max AQI": state.max_pollutions, } ) page = """ <|{data_province_displayed}|chart|type=densitymapbox|plot_config={config}|options={options}|lat=Latitude|lon=Longitude|layout={layout_map}|z=Pollution|mode=markers|class_name=map|height=40vh|> <|layout|columns=1 2 2| <|part|class_name=card| **Max Measured AQI:**<br/><br/><br/> <|{int(data_province_displayed["Pollution"].max())}|indicator|value={int(data_province_displayed["Pollution"].max())}|min=140|max=0|> <br/><br/> **Average Measured AQI:**<br/><br/><br/> <|{int(data_province_displayed["Pollution"].mean())}|indicator|value={int(data_province_displayed["Pollution"].mean())}|min=140|max=0|> |> <|part|class_name=card| <|{drone_data}|table|show_all=True|> |> <|part|class_name=card| <|{line_data[-30:]}|chart|type=lines|x=Time|y=Max AQI|layout={layout_line}|height=40vh|> |> |> """ gui = Gui(page=page) t = Thread( target=client_handler, args=( gui, state_id_list, ), ) t.start() gui.run(run_browser=False)
""" A page of the application. Page content is imported from the Drift.md file. Please refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details. """ import taipy as tp from taipy.gui import Markdown import pandas as pd from taipy.gui import notify from configuration.config import scenario_cfg Drift = Markdown("pages/Drift/Drift.md") def merge_data(ref_data: pd.DataFrame, compare_data: pd.DataFrame): """ Merges the reference and comparison data into a single dataframe. The Dataframe is prepared for plotting. Args: ref_data: The reference data. compare_data: The comparison data. Returns: plot_data: The dataset for other columns. sex_data: The dataset for sex distribution. """ bp_data = [ {"Blood Pressure": list(ref_data["blood_pressure"])}, {"Blood Pressure": list(compare_data["blood_pressure"])}, ] # Count the Male and Female rows in ref and compare male_ref = ref_data[ref_data["sex"] == "Male"].shape[0] male_compare = compare_data[compare_data["sex"] == "Male"].shape[0] female_ref = ref_data[ref_data["sex"] == "Female"].shape[0] female_compare = compare_data[compare_data["sex"] == "Female"].shape[0] sex_data = pd.DataFrame( { "Dataset": ["Ref", "Compare"], "Male": [male_ref, male_compare], "Female": [female_ref, female_compare], } ) return bp_data, sex_data def on_ref_change(state): state.ref_data = pd.read_csv("data/" + state.ref_selected + ".csv") state.scenario.reference_data.write(state.ref_data) state.bp_data, state.sex_data = merge_data(state.ref_data, state.compare_data) def on_compare_change(state): state.compare_data = pd.read_csv("data/" + state.compare_selected + ".csv") state.scenario.compare_data.write(state.compare_data) state.bp_data, state.sex_data = merge_data(state.ref_data, state.compare_data) bp_options = [ # First data set displayed as green-ish, and 5 bins { "marker": {"color": "#4A4", "opacity": 0.8}, "nbinsx": 10, }, # Second data set displayed as red-ish, and 25 bins { "marker": {"color": "#A33", "opacity": 0.8, "text": "Compare Data"}, "nbinsx": 10, }, ] bp_layout = { # Overlay the two histograms "barmode": "overlay", "title": "Blood Pressure Distribution (Green = Reference, Red = Compare)", "showlegend": False, } def on_submission_status_change(state, submittable, details): submission_status = details.get("submission_status") if submission_status == "COMPLETED": notify(state, "success", "Drift Detection Completed") state.refresh("scenario")
from taipy.gui import Gui import numpy as np item1 = "None" lov = [1, 2, 3] page = """ <|{item1}|selector|lov={lov}|> """ Gui(page).run()
from taipy.gui import Gui from math import cos, exp state = {"amp": 1, "data":[]} def update(state): x = [i/10 for i in range(100)] y = [math.sin(i)*state.amp for i in x] state.data = [{"data": y}] page = """ Amplitude: <|{amp}|slider|> <|Data|chart|data={data}|> """ Gui(page).run(state=state)
import numpy as np from taipy.gui import Markdown from data.data import data marker_map = {"color":"Deaths", "size": "Size", "showscale":True, "colorscale":"Viridis"} layout_map = { "dragmode": "zoom", "mapbox": { "style": "open-street-map", "center": { "lat": 38, "lon": -90 }, "zoom": 3} } options = {"unselected":{"marker":{"opacity":0.5}}} def initialize_map(data): data['Province/State'] = data['Province/State'].fillna(data["Country/Region"]) data_province = data.groupby(["Country/Region", 'Province/State', 'Longitude', 'Latitude'])\ .max() data_province_displayed = data_province[data_province['Deaths']>10].reset_index() data_province_displayed['Size'] = np.sqrt(data_province_displayed.loc[:,'Deaths']/data_province_displayed.loc[:,'Deaths'].max())*80 + 3 data_province_displayed['Text'] = data_province_displayed.loc[:,'Deaths'].astype(str) + ' deaths </br> ' + data_province_displayed.loc[:,'Province/State'] return data_province_displayed data_province_displayed = initialize_map(data) map_md = Markdown("pages/map/map.md")
import requests import json # GitHub API setup token = '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' headers = {'Authorization': f'token {token}'} # Function to recursively list files in a repository def list_files_in_repo(repo_full_name, path=''): url = f'https://api.github.com/repos/{repo_full_name}/contents/{path}' response = requests.get(url, headers=headers) files = response.json() file_paths = [] for file in files: if file['type'] == 'file' and (file['name'].endswith('.py') or file['name'].endswith('.md')): file_paths.append(file['path']) elif file['type'] == 'dir': file_paths.extend(list_files_in_repo(repo_full_name, file['path'])) return file_paths # Function to get the content of a file def get_file_content(repo, file_path): url = f'https://api.github.com/repos/{repo}/contents/{file_path}' response = requests.get(url, headers=headers) content = response.json().get('content', '') return content # Function to save content to JSONL def save_to_jsonl(contents, filename='output.jsonl'): with open(filename, 'w') as file: for content in contents: json_record = json.dumps({"text": content}) file.write(json_record + '\n') # Function to search repositories def search_repos(query): url = f'https://api.github.com/search/repositories?q={query}' response = requests.get(url, headers=headers) return response.json()['items'] # Main process repositories = search_repos('Taipy') file_contents = [] for repo in repositories: file_paths = list_files_in_repo(repo['full_name']) for file_path in file_paths: content = get_file_content(repo['full_name'], file_path) file_contents.append(content) save_to_jsonl(file_contents)
from taipy.gui import Gui from math import sin, cos, pi state = { "frequency": 1, "decay": 0.01, "data": [] } page = """ # Sine and Cosine Functions Frequency: <|{frequency}|slider|min=0|max=10|step=0.1|on_change=update|> Decay: <|{decay}|slider|min=0|max=1|step=0.01|on_change=update|> <|Data|chart|data={data}|> """ def update(state): x = [i/10 for i in range(100)] y1 = [sin(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] y2 = [cos(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] state.data = [ {"name": "Sine", "data": y1}, {"name": "Cosine", "data": y2} ] Gui(page).run(use_reloader=True, state=state)
import numpy as np import pandas as pd from taipy.gui import Markdown from data.data import data selected_country = 'France' data_country_date = None representation_selector = ['Cumulative', 'Density'] selected_representation = representation_selector[0] layout = {'barmode':'stack', "hovermode":"x"} options = {"unselected":{"marker":{"opacity":0.5}}} def initialize_case_evolution(data, selected_country='France'): # Aggregation of the dataframe to erase the regions that will not be used here data_country_date = data.groupby(["Country/Region",'Date'])\ .sum()\ .reset_index() # a country is selected, here France by default data_country_date = data_country_date.loc[data_country_date['Country/Region']==selected_country] return data_country_date data_country_date = initialize_case_evolution(data) pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"],"values": [data_country_date.iloc[-1, 6], data_country_date.iloc[-1, 5], data_country_date.iloc[-1, 4]]}) def convert_density(state): if state.selected_representation == 'Density': df_temp = state.data_country_date.copy() df_temp['Deaths'] = df_temp['Deaths'].diff().fillna(0) df_temp['Recovered'] = df_temp['Recovered'].diff().fillna(0) df_temp['Confirmed'] = df_temp['Confirmed'].diff().fillna(0) state.data_country_date = df_temp else: state.data_country_date = initialize_case_evolution(data, state.selected_country) def on_change_country(state): # state contains all the Gui variables and this is through this state variable that we can update the Gui # state.selected_country, state.data_country_date, ... # update data_country_date with the right country (use initialize_case_evolution) print("Chosen country: ", state.selected_country) state.data_country_date = initialize_case_evolution(data, state.selected_country) state.pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"], "values": [state.data_country_date.iloc[-1, 6], state.data_country_date.iloc[-1, 5], state.data_country_date.iloc[-1, 4]]}) convert_density(state) country_md = Markdown("pages/country/country.md")
import requests import json import logging # Setup logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # GitHub API setup token = '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' headers = {'Authorization': f'token {token}'} # Function to search repositories def search_repos(query): url = f'https://api.github.com/search/repositories?q={query}' response = requests.get(url, headers=headers) return response.json()['items'] # Function to recursively list files in a repository def list_files_in_repo(repo_full_name, path=''): url = f'https://api.github.com/repos/{repo_full_name}/contents/{path}' response = requests.get(url, headers=headers) files = response.json() file_paths = [] for file in files: if file['type'] == 'file' and (file['name'].endswith('.py') or file['name'].endswith('.md')): file_paths.append(file['path']) elif file['type'] == 'dir': file_paths.extend(list_files_in_repo(repo_full_name, file['path'])) return file_paths # Function to get the content of a file def get_file_content(repo, file_path): url = f'https://api.github.com/repos/{repo}/contents/{file_path}' response = requests.get(url, headers=headers) content = response.json().get('content', '') return content # Function to save content to JSONL def save_to_jsonl(contents, filename='output.jsonl'): with open(filename, 'w') as file: for content in contents: json_record = json.dumps({"text": content}) file.write(json_record + '\n') # Main process repositories = search_repos('Taipy') logging.info(f'Found {len(repositories)} repositories with "Taipy"') file_contents = [] for repo in repositories: logging.info(f'Processing repository: {repo["full_name"]}') try: file_paths = list_files_in_repo(repo['full_name']) logging.info(f'Found {len(file_paths)} .py and .md files in {repo["full_name"]}') for file_path in file_paths: content = get_file_content(repo['full_name'], file_path) file_contents.append(content) logging.info(f'Added content from {file_path}') except Exception as e: logging.error(f'Error processing repository {repo["full_name"]}: {e}') save_to_jsonl(file_contents) logging.info('Finished processing all repositories')
""" Taipy app to generate mandelbrot fractals """ from taipy import Gui import numpy as np from PIL import Image import matplotlib.pyplot as plt WINDOW_SIZE = 500 cm = plt.cm.get_cmap("viridis") def generate_mandelbrot( center: int = WINDOW_SIZE / 2, dx_range: int = 1000, dx_start: float = -0.12, dy_range: float = 1000, dy_start: float = -0.82, iterations: int = 50, max_value: int = 200, i: int = 0, ) -> str: mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE)) for y in range(WINDOW_SIZE): for x in range(WINDOW_SIZE): dx = (x - center) / dx_range + dx_start dy = (y - center) / dy_range + dy_start a = dx b = dy for t in range(iterations): d = (a * a) - (b * b) + dx b = 2 * (a * b) + dy a = d h = d > max_value if h is True: mat[x, y] = t colored_mat = cm(mat / mat.max()) im = Image.fromarray((colored_mat * 255).astype(np.uint8)) path = f"mandelbrot_{i}.png" im.save(path) return path def generate(state): state.i = state.i + 1 state.path = generate_mandelbrot( dx_start=-state.dx_start / 100, dy_start=(state.dy_start - 100) / 100, iterations=state.iterations, i=state.i, ) i = 0 dx_start = 11 dy_start = 17 iterations = 50 path = generate_mandelbrot( dx_start=-dx_start / 100, dy_start=(dy_start - 100) / 100, ) page = """ # Mandelbrot Generator <|layout|columns=35 65| Display image from path <|{path}|image|width=500px|height=500px|class_name=img|> Iterations:<br /> Create a slider to select iterations <|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|><br /> X Position:<br /> <|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|><br /> Y Position:<br /> Slider dx_start <|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|><br /> |> """ Gui(page).run(title="Mandelbrot Generator")
from taipy.gui import Markdown import numpy as np from data.data import data selector_country = list(np.sort(data['Country/Region'].astype(str).unique())) selected_country = 'France' root = Markdown("pages/root.md")
from taipy.gui import Gui from math import cos, exp value = 10 page = """ Markdown # Taipy *Demo* Value: <|{value}|text|> <|{value}|slider|> <|{compute_data(value)}|chart|> """ def compute_data(decay: int) -> list: return [cos(i / 6) * exp(-i * decay / 600) for i in range(100)] Gui(page).run(use_reloader=True, port=5003)
# Import from standard library import logging import random import re # Import from 3rd party libraries from taipy.gui import Gui, notify, state import taipy # Import modules import oai # Configure logger logging.basicConfig(format="\n%(asctime)s\n%(message)s", level=logging.INFO, force=True) def error_prompt_flagged(state, prompt): """Notify user that a prompt has been flagged.""" notify(state, "error", "Prompt flagged as inappropriate.") logging.info(f"Prompt flagged as inappropriate: {prompt}") def error_too_many_requests(state): """Notify user that too many requests have been made.""" notify( state, "error", "Too many requests. Please wait a few seconds before generating another text or image.", ) logging.info(f"Session request limit reached: {state.n_requests}") state.n_requests = 1 # Define functions def generate_text(state): """Generate Tweet text.""" state.tweet = "" state.image = None # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return # Check if the user has put a topic if state.topic == "": notify(state, "error", "Please enter a topic") return # Create the prompt and add a style or not if state.style == "": state.prompt = ( f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters " f"and with the style of {state.style}:\n\n\n\n" ) else: state.prompt = f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\n\n" # openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(state.prompt) if flagged: error_prompt_flagged(state, f"Prompt: {state.prompt}\n") return else: # Generate the tweet state.n_requests += 1 state.tweet = openai.complete(state.prompt).strip().replace('"', "") # Notify the user in console and in the GUI logging.info( f"Topic: {state.prompt}{state.mood}{state.style}\n" f"Tweet: {state.tweet}" ) notify(state, "success", "Tweet created!") def generate_image(state): """Generate Tweet image.""" notify(state, "info", "Generating image...") # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return state.image = None # Creates the prompt prompt_wo_hashtags = re.sub("#[A-Za-z0-9_]+", "", state.prompt) processing_prompt = ( "Create a detailed but brief description of an image that captures " f"the essence of the following text:\n{prompt_wo_hashtags}\n\n" ) # Openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(processing_prompt) if flagged: error_prompt_flagged(state, processing_prompt) return else: state.n_requests += 1 # Generate the prompt that will create the image processed_prompt = ( openai.complete(prompt=processing_prompt, temperature=0.5, max_tokens=40) .strip() .replace('"', "") .split(".")[0] + "." ) # Generate the image state.image = openai.image(processed_prompt) # Notify the user in console and in the GUI logging.info(f"Tweet: {state.prompt}\nImage prompt: {processed_prompt}") notify(state, "success", f"Image created!") def feeling_lucky(state): """Generate a feeling-lucky tweet.""" with open("moods.txt") as f: sample_moods = f.read().splitlines() state.topic = "an interesting topic" state.mood = random.choice(sample_moods) state.style = "" generate_text(state) # Variables tweet = "" prompt = "" n_requests = 0 topic = "AI" mood = "inspirational" style = "elonmusk" image = None # Called whever there is a problem def on_exception(state, function_name: str, ex: Exception): logging.error(f"Problem {ex} \nin {function_name}") notify(state, "error", f"Problem {ex} \nin {function_name}") def update_documents(state: taipy.gui.state, docs: list[dict]) -> None: """ Updates a partial with a list of documents Args: state: The state of the GUI docs: A list of documents """ updated_part = "" for doc in docs: title = doc["title"] summary = doc["summary"] link = doc["link"] updated_part += f""" <a href="{link}" target="_blank"> <h3>{title}</h3> </a> <p>{summary}</p> <br/> """ state.p.update_content(state, updated_part) # Markdown for the entire page ## <text| ## |text> ## "text" here is just a name given to my part/my section ## it has no meaning in the code page = """ <|container| # **Generate**{: .color-primary} Tweets This mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALLΒ·E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal). <br/> <a href="{azaz}" target="_blank"> <h3>{sqdqs}</h3> </a> <p>{qfqffqs}</p> <br/> <|layout|columns=1 1 1|gap=30px|class_name=card| <topic| ## **Topic**{: .color-primary} (or hashtag) <|{topic}|input|label=Topic (or hashtag)|> |topic> <mood| ## **Mood**{: .color-primary} <|{mood}|input|label=Mood (e.g. inspirational, funny, serious) (optional)|> |mood> <style| ## Twitter **account**{: .color-primary} <|{style}|input|label=Twitter account handle to style-copy recent Tweets (optional)|> |style> Create a Generate text button <|Generate text|button|on_action=generate_text|label=Generate text|> <|Feeling lucky|button|on_action=feeling_lucky|label=Feeling Lucky|> |> <br/> --- <br/> ### Generated **Tweet**{: .color-primary} Create a text input for the tweet <|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|> <center><|Generate image|button|on_action=generate_image|label=Generate image|active={prompt!="" and tweet!=""}|></center> <image|part|render={prompt != "" and tweet != "" and image is not None}|class_name=card| ### **Image**{: .color-primary} from Dall-e Display image <center><|{image}|image|height=400px|></center> |image> Break line <br/> **Code from [@kinosal](https://twitter.com/kinosal)** Original code can be found [here](https://github.com/kinosal/tweet) |> """ if __name__ == "__main__": Gui(page).run(dark_mode=False, port=5089)
import pymongo from dotenv import load_dotenv import os from taipy.gui import notify import pandas as pd load_dotenv() client = pymongo.MongoClient(os.getenv("MONGO_URI")) db = client["GoShop"] collection_product = db["products"] def insert_one_collection(document): return collection_product.insert_one(document) def all_prodcuts(): cursor = collection_product.find() list_cur = list(cursor) df = pd.DataFrame(list_cur) return df
from taipy.gui import Gui, Markdown, navigate from pages.addproduct import addproduct_md from pages.home import home_md from pages.developer import developer_md favicone = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcStQrffFMV3jCG2wB7o7Bs1VwUJ3Z0sWQhbzA&usqp=CAU" # root_md = "<|menu|label=Menu|lov={[('home', Icon('https://static.vecteezy.com/system/resources/thumbnails/000/616/494/small/home-06.jpg','home')), ('addproduct', 'addproduct')]}|on_action=on_menu|>" def on_menu(state, action, info): page = info["args"][0] navigate(state, to=page) pages = { "/": "<center><|navbar|></center>", "home": home_md, "addproduct": addproduct_md, "team": developer_md, } Gui(pages=pages).run(title="Rapid~Receipt", favicon=favicone, run_browser=False, use_reloader=True, port=5555)
from taipy.gui import Markdown, notify from database import all_prodcuts import pandas products = all_prodcuts() del products["_id"] products["isavailable"].replace({True: "Yes", False: "No"}) yes = 0 no = 0 for i in products["isavailable"]: if i == True: yes = yes+1 else: no = no+1 data = { "Country": ["Sold", "Available"], "Area": [yes, no] } # for count vs products # names_count = {} # for name in products["name"]: # if name in names_count: # names_count[name] += 1 # else: # names_count = 1 # pairs = [] # for name, count in names_count.items(): # for i in range(count): # pairs.append([name, count]) # data_p_vs_c = pandas.DataFrame(pairs, columns=["Products", "Quantity"]) home_md = Markdown(""" <|toggle|theme|> <|container| # <center>**Rapid~Receipt**{: .color-primary} |Welcome Admin</center> <|layout|columns=2 1|gap=30px|hover_text=true| <| ### List of products <|Table|expandable| <|{products}|table|width=100%|> |> |> <| ### Available of Products <|{data}|chart|type=pie|values=Area|labels=Country|> |> |> ### Products sell |> """)
from taipy.gui import Markdown, notify, navigate from database import insert_one_collection, all_prodcuts image1 = "https://www.identixweb.com/wp-content/uploads/2022/01/Add-Customization-for-Custom-Products.png" image2 = "https://img.freepik.com/free-vector/online-wishes-list-concept-illustration_114360-3900.jpg" name = "" price = "" expiry = "" pid = "" def submit_action(state): if state.pid == "" or state.price == "" or state.expiry == "" or state.pid == "": notify(state, 'error', "Product data is invalid") return product = { "pid": int(state.pid), "name": state.name, "price": state.price, "expiry": state.expiry, "isavailable": True, } insert_one_collection(product) notify(state, 'info', "Product inserted") navigate(state, to="/home") addproduct_md = Markdown(""" <|toggle|theme|> <|container| ## Add **Product**{: .color-primary} πŸ“¦ <| <|50 50|layout|class_name= card| <| <|layout|columns= 1 1| <| #### Product Id: <|{pid}|input|> |> <| #### Product Name: <|{name}|input|> |> |> <br/><br/> <br/> <| <|layout|columns = 1 1 | <| #### Product Price: <|{price}|label=Price|input|> |> <| #### Product Expiry: <|{expiry}|input|> |> |> <br/> <|submit|button|on_action=submit_action|> |> |> <|{image2}|image|height=500px|width=500px|> |> |> |> """)
from taipy.gui import Markdown img1 = "https://www.identixweb.com/wp-content/uploads/2022/01/Add-Customization-for-Custom-Products.png" developer_md = Markdown(""" <|toggle|theme|> ## Our Team **Mahi**{: .color-primary} <|container <|layout|columns= 1 1 1 |gap=30px| <| <|{img1}|image|width=100%|> Suruchi |> <| <|{img1}|image|width=100%|> Shoaib |> <|{img1}|image|width=100%|> Shivam |> |> """)
# κΈ°λ³Έ νŒ¨ν‚€μ§€ import pandas as pd # 타이피 ν•¨μˆ˜ import taipy as tp from taipy.gui import Gui, Icon # ꡬ성 κ°€μ Έμ˜€κΈ° from config.config import scenario_cfg from taipy.core.config.config import Config import os # μž„μ‹œ νŒŒμΌμ„ μƒμ„±ν•˜κΈ° μœ„ν•΄ import import pathlib # 이 κ²½λ‘œλŠ” Datasouces νŽ˜μ΄μ§€μ—μ„œ ν…Œμ΄λΈ”μ„ λ‹€μš΄λ‘œλ“œν•  수 μžˆλŠ” μž„μ‹œ νŒŒμΌμ„ λ§Œλ“œλŠ” 데 μ‚¬μš©λ©λ‹ˆλ‹€. # tempdir = pathlib.Path(".tmp") tempdir.mkdir(exist_ok=True) PATH_TO_TABLE = str(tempdir / "table.csv") ############################################################################### # 데이터 μ €μž₯μ†Œλ₯Ό μ²­μ†Œν•©λ‹ˆλ‹€. ############################################################################### Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() ############################################################################################################################## # μ‹œλ‚˜λ¦¬μ˜€ μ‹€ν–‰ ############################################################################################################################## def create_first_scenario(scenario_cfg): global scenario scenario = tp.create_scenario(scenario_cfg) tp.submit(scenario) create_first_scenario(scenario_cfg) # ############################################################################################################################ # μ΄ˆκΈ°ν™” - μ‹œλ‚˜λ¦¬μ˜€μ˜ 값을 읽을 수 μžˆμŠ΅λ‹ˆλ‹€. ############################################################################################################################## forecast_values_baseline = scenario.pipelines['pipeline_baseline'].forecast_dataset.read() forecast_values = scenario.pipelines['pipeline_model'].forecast_dataset.read() test_dataset = scenario.pipelines['pipeline_baseline'].test_dataset.read() train_dataset = scenario.pipelines['pipeline_preprocessing'].train_dataset.read() roc_dataset = scenario.pipelines['pipeline_baseline'].roc_data.read() test_dataset.columns = [str(column).upper() for column in test_dataset.columns] # νžˆμŠ€ν† κ·Έλž¨κ³Ό 산점도λ₯Ό μ΄μš©ν•œ 데이터 μ‹œκ°ν™”λ₯Ό μœ„ν•œ κ²ƒμž…λ‹ˆλ‹€. select_x = test_dataset.drop('EXITED',axis=1).columns.tolist() x_selected = select_x[0] select_y = select_x y_selected = select_y[1] ############################################################################################################################## # μ΄ˆκΈ°ν™” - μ°¨νŠΈμ— μ‚¬μš©λ  κ²°κ³Όλ₯Ό μž¬κ°œν•˜λŠ” 데이터셋 생성 ############################################################################################################################## from pages.main_dialog import * values_baseline = scenario.pipelines['pipeline_baseline'].results.read() values_model = scenario.pipelines['pipeline_model'].results.read() values = values_baseline.copy() forecast_series = values['Forecast'] true_series = values['Historical'] scatter_dataset_pred = creation_scatter_dataset_pred(test_dataset,forecast_series) histo_full_pred = creation_histo_full_pred(test_dataset,forecast_series) histo_full = creation_histo_full(test_dataset) scatter_dataset = creation_scatter_dataset(test_dataset) features_table = scenario.pipelines['pipeline_train_baseline'].feature_importance.read() # μ˜¬λ°”λ₯Έ νŒŒμ΄ν”„λΌμΈμ„ κ°€μ Έμ˜€λŠ” 일반 μ½”λ“œμ˜ νŒŒμ΄ν”„λΌμΈ 비ꡐ # pipelines_to_compare = [pipeline for pipeline in scenario.pipelines if 'train' not in pipeline and 'preprocessing' not in pipeline] accuracy_graph, f1_score_graph, score_auc_graph = compare_models_baseline(scenario, pipelines_to_compare) # comes from the compare_models.py ############################################################################################################################## # μ΄ˆκΈ°ν™” - ν‘œμ‹œλ  λͺ¨λΈμ˜ 정확도와 클래슀 뢄포λ₯Ό 보기 μœ„ν•œ 파이 차트 생성 ############################################################################################################################## # 'κΈ°μ€€' λͺ¨λΈμ— λŒ€ν•œ λ©”νŠΈλ¦­μ„ κ³„μ‚°ν•©λ‹ˆλ‹€. (number_of_predictions, accuracy, f1_score, score_auc, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_) = c_update_metrics(scenario, 'pipeline_baseline') # 파이 차트 pie_plotly = pd.DataFrame({"values": [number_of_good_predictions, number_of_false_predictions], "labels": ["Correct predictions", "False predictions"]}) distrib_class = pd.DataFrame({"values": [len(values[values["Historical"]==0]),len(values[values["Historical"]==1])], "labels" : ["Stayed", "Exited"]}) ############################################################################################################################## # μ΄ˆκΈ°ν™” - ν‘œμ‹œλ  False/positive/negative/true ν…Œμ΄λΈ” 생성 ############################################################################################################################## score_table = pd.DataFrame({"Score":["Predicted stayed", "Predicted exited"], "Stayed": [tn_, fp_], "Exited" : [fn_, tp_]}) pie_confusion_matrix = pd.DataFrame({"values": [tp_,tn_,fp_,fn_], "labels" : ["True Positive","True Negative","False Positive", "False Negative"]}) ############################################################################################################################## # μ΄ˆκΈ°ν™” - κ·Έλž˜ν”½ μ‚¬μš©μž μΈν„°νŽ˜μ΄μŠ€ 생성(μƒνƒœ) ############################################################################################################################## # The list of pages that will be shown in the menu at the left of the page menu_lov = [("Data Visualization", Icon('images/histogram_menu.svg', 'Data Visualization')), ("Model Manager", Icon('images/model.svg', 'Model Manager')), ("Compare Models", Icon('images/compare.svg', 'Compare Models')), ('Databases', Icon('images/Datanode.svg', 'Databases'))] width_plotly = "450px" height_plotly = "450px" page_markdown = """ <|toggle|theme|> <|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|> <|part|render={page == 'Data Visualization'}| """ + dv_data_visualization_md + """ |> <|part|render={page == 'Model Manager'}| """ + mm_model_manager_md + """ |> <|part|render={page == 'Compare Models'}| """ + cm_compare_models_md + """ |> <|part|render={page == 'Databases'}| """ + db_databases_md + """ |> """ # 초기 νŽ˜μ΄μ§€λŠ” "μ‹œλ‚˜λ¦¬μ˜€ κ΄€λ¦¬μž" νŽ˜μ΄μ§€μž…λ‹ˆλ‹€. page = "Data Visualization" def menu_fct(state,var_name:str,fct,var_value): """메뉴 μ»¨νŠΈλ‘€μ— 변경이 μžˆμ„ λ•Œ ν˜ΈμΆœλ˜λŠ” ν•¨μˆ˜ Args: state : Taipy의 μƒνƒœ 객체 var_name (str): λ³€κ²½λœ λ³€μˆ˜λͺ… var_value (obj): λ³€κ²½λœ λ³€μˆ˜ κ°’ """ # μ˜¬λ°”λ₯Έ νŽ˜μ΄μ§€λ₯Ό λ Œλ”λ§ν•˜κΈ° μœ„ν•΄ state.page λ³€μˆ˜μ˜ 값을 λ³€κ²½ν•©λ‹ˆλ‹€. try : state.page = var_value['args'][0] except: print("Warning : No args were found") pass # 예츑 ν…Œμ΄λΈ”μ„ μœ„ν•œ ν•¨μˆ˜. λ‚˜μœ μ˜ˆμΈ‘μ€ 빨간색이고 쒋은 μ˜ˆμΈ‘μ€ λ…Ήμƒ‰μž…λ‹ˆλ‹€(css 클래슀). def get_style(state, index, row): return 'red' if row['Historical']!=row['Forecast'] else 'green' ############################################################################################################################## # 전체 λ§ˆν¬λ‹€μš΄ 생성 ############################################################################################################################## # dialog_mdλŠ” main_dialog.py에 μžˆμŠ΅λ‹ˆλ‹€. # the other are found in the dialogs folder entire_markdown = page_markdown + dialog_md # νŽ˜μ΄μ§€ 생성에 μ‚¬μš©λ  객체 gui = Gui(page=entire_markdown, css_file='main') dialog_partial_roc = gui.add_partial(dialog_roc) partial_scatter = gui.add_partial(creation_of_dialog_scatter(x_selected)) partial_histo = gui.add_partial(creation_of_dialog_histogram(x_selected)) partial_scatter_pred = gui.add_partial(creation_of_dialog_scatter_pred(x_selected)) partial_histo_pred = gui.add_partial(creation_of_dialog_histogram_pred(x_selected)) def update_partial_charts(state): """이 ν•¨μˆ˜λŠ” μ°¨νŠΈμ™€ 선택기λ₯Ό ν¬ν•¨ν•˜λŠ” 4개의 뢀뢄을 μ—…λ°μ΄νŠΈν•©λ‹ˆλ‹€. PartialsλŠ” μ•„λž˜ κΈ°λŠ₯을 μ‚¬μš©ν•˜μ—¬ λŸ°νƒ€μž„μ— λ‹€μ‹œ λ‘œλ“œν•  수 μžˆλŠ” λ―Έλ‹ˆ νŽ˜μ΄μ§€μž…λ‹ˆλ‹€. 차트의 λ‚΄μš©μ„ λ³€κ²½ν•˜κΈ° μœ„ν•΄ λ‹€μ‹œ λ‘œλ“œλ©λ‹ˆλ‹€. Args: state: GUIμ—μ„œ μ‚¬μš©λ˜λŠ” λͺ¨λ“  λ³€μˆ˜λ₯Ό ν¬ν•¨ν•˜λŠ” 객체 """ state.partial_scatter.update_content(state, creation_of_dialog_scatter(state.x_selected, state)) state.partial_histo.update_content(state, creation_of_dialog_histogram(state.x_selected, state)) state.partial_scatter_pred.update_content(state, creation_of_dialog_scatter_pred(state.x_selected, state)) state.partial_histo_pred.update_content(state, creation_of_dialog_histogram_pred(state.x_selected, state)) ############################################################################################################################## # ν‘œμ‹œλœ λ³€μˆ˜ μ—…λ°μ΄νŠΈ ############################################################################################################################## def update_variables(state, pipeline): """이 ν•¨μˆ˜λŠ” μ‘μš© ν”„λ‘œκ·Έλž¨μ—μ„œ μ‚¬μš©λ˜λŠ” λ‹€μ–‘ν•œ λ³€μˆ˜μ™€ 데이터 ν”„λ ˆμž„μ„ μ—…λ°μ΄νŠΈν•©λ‹ˆλ‹€. Args: state: GUIμ—μ„œ μ‚¬μš©λ˜λŠ” λͺ¨λ“  λ³€μˆ˜λ₯Ό ν¬ν•¨ν•˜λŠ” 객체 pipeline (str): λ³€μˆ˜λ₯Ό μ—…λ°μ΄νŠΈν•˜λŠ” 데 μ‚¬μš©λ˜λŠ” νŒŒμ΄ν”„λΌμΈμ˜ 이름 """ global scenario pipeline_str = 'pipeline_'+pipeline if pipeline == 'baseline': state.values = scenario.pipelines[pipeline_str].results.read() else: state.values = scenario.pipelines[pipeline_str].results.read() state.forecast_series = state.values['Forecast'] state.true_series = state.values["Historical"] (state.number_of_predictions, state.accuracy, state.f1_score, state.score_auc, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_) = c_update_metrics(scenario, pipeline_str) update_charts(state, pipeline_str, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_) def update_charts(state, pipeline_str, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_): """이 ν•¨μˆ˜λŠ” GUI의 λͺ¨λ“  차트λ₯Ό μ—…λ°μ΄νŠΈν•©λ‹ˆλ‹€. Args: state: GUIμ—μ„œ μ‚¬μš©λ˜λŠ” λͺ¨λ“  λ³€μˆ˜λ₯Ό ν¬ν•¨ν•˜λŠ” 객체 pipeline_str(str): ν‘œμ‹œλœ νŒŒμ΄ν”„λΌμΈμ˜ 이름 number_of_good_predictions(int): 쒋은 예츑의 수 number_of_false_predictions(int): 잘λͺ»λœ 예츑의 수 fp_ (float): μœ„μ–‘μ„± λΉ„μœ¨ tp_ (float): μ°Έ 긍정 λΉ„μœ¨ fn_ (float): μœ„μŒμ„± λΉ„μœ¨ tn_ (float): μ°Έ 음수 λΉ„μœ¨ """ state.roc_dataset = scenario.pipelines[pipeline_str].roc_data.read() if 'model' in pipeline_str: state.features_table = scenario.pipelines['pipeline_train_model'].feature_importance.read() elif 'baseline' in pipeline_str: state.features_table = scenario.pipelines['pipeline_train_baseline'].feature_importance.read() state.score_table = pd.DataFrame({"Score":["Predicted stayed", "Predicted exited"], "Stayed": [tn_, fp_], "Exited" : [fn_, tp_]}) state.pie_confusion_matrix = pd.DataFrame({"values": [tp_, tn_, fp_, fn_], "labels" : ["True Positive", "True Negative", "False Positive", "False Negative"]}) state.scatter_dataset_pred = creation_scatter_dataset_pred(test_dataset, state.forecast_series) state.histo_full_pred = creation_histo_full_pred(test_dataset, state.forecast_series) # 파이 차트 state.pie_plotly = pd.DataFrame({"values": [number_of_good_predictions, number_of_false_predictions], "labels": ["Correct predictions", "False predictions"]}) state.distrib_class = pd.DataFrame({"values": [len(state.values[state.values["Historical"]==0]), len(state.values[state.values["Historical"]==1])], "labels" : ["Stayed", "Exited"]}) ############################################################################################################################## # on_change ν•¨μˆ˜ ############################################################################################################################## # λ‹€λ₯Έ κΈ°λŠ₯은 frontend/dialogs의 였λ₯Έμͺ½ 폴더에 μžˆμŠ΅λ‹ˆλ‹€. def on_change(state, var_name, var_value): """이 ν•¨μˆ˜λŠ” GUIμ—μ„œ λ³€μˆ˜κ°€ 변경될 λ•Œ ν˜ΈμΆœλ©λ‹ˆλ‹€. Args: state : GUIμ—μ„œ μ‚¬μš©λ˜λŠ” λͺ¨λ“  λ³€μˆ˜λ₯Ό ν¬ν•¨ν•˜λŠ” 객체 var_name (str): λ³€κ²½λœ λ³€μˆ˜μ˜ 이름 var_value (obj): λ³€κ²½λœ λ³€μˆ˜μ˜ κ°’ """ if var_name == 'x_selected' or var_name == 'y_selected': update_partial_charts(state) if var_name == 'mm_algorithm_selected': if var_value == 'Baseline': update_variables(state,'baseline') if var_value == 'ML': update_variables(state,'model') if (var_name == 'mm_algorithm_selected' or var_name == "db_table_selected" and state.page == 'Databases') or (var_name == 'page' and var_value == 'Databases'): # 'λ°μ΄ν„°λ² μ΄μŠ€' νŽ˜μ΄μ§€μ— μžˆλŠ” 경우 μž„μ‹œ csv νŒŒμΌμ„ λ§Œλ“€μ–΄μ•Ό ν•©λ‹ˆλ‹€. handle_temp_csv_path(state) if var_name == 'page' and var_value != 'Databases': delete_temp_csv() def delete_temp_csv(): """이 ν•¨μˆ˜λŠ” μž„μ‹œ csv νŒŒμΌμ„ μ‚­μ œν•©λ‹ˆλ‹€.""" if os.path.exists(PATH_TO_TABLE): os.remove(PATH_TO_TABLE) def handle_temp_csv_path(state): """μž„μ‹œ csv 파일이 μ‘΄μž¬ν•˜λŠ”μ§€ ν™•μΈν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€. μ‘΄μž¬ν•˜λ©΄ μ‚­μ œν•©λ‹ˆλ‹€. 그러면 μž„μ‹œ csv 파일이 였λ₯Έμͺ½ ν…Œμ΄λΈ”μ— λŒ€ν•΄ μƒμ„±λ©λ‹ˆλ‹€. Args: state: GUIμ—μ„œ μ‚¬μš©λ˜λŠ” λͺ¨λ“  λ³€μˆ˜λ₯Ό ν¬ν•¨ν•˜λŠ” 객체 """ if os.path.exists(PATH_TO_TABLE): os.remove(PATH_TO_TABLE) if state.db_table_selected == 'Test Dataset': state.test_dataset.to_csv(PATH_TO_TABLE, sep=';') if state.db_table_selected == 'Confusion Matrix': state.score_table.to_csv(PATH_TO_TABLE, sep=';') if state.db_table_selected == "Training Dataset": train_dataset.to_csv(PATH_TO_TABLE, sep=';') if state.db_table_selected == "Forecast Dataset": values.to_csv(PATH_TO_TABLE, sep=';') ############################################################################################################################## # GUI μ‹€ν–‰ ############################################################################################################################## if __name__ == '__main__': gui.run(title="Churn classification", host='0.0.0.0', port=os.environ.get('PORT', '5050'), dark_mode=False) else: app = gui.run(title="Churn classification", dark_mode=False, run_server=False)
from algos.algos import * from taipy import Scope, Frequency, Config ############################################################################################################################## # λ°μ΄ν„°λ…Έλ“œ 생성 ############################################################################################################################## # λ°μ΄ν„°λ² μ΄μŠ€μ— μ—°κ²°ν•˜λŠ” 방법 path_to_csv = 'data/churn.csv' # csv의 κ²½λ‘œμ™€ ν”Όν΄μ˜ file_path initial_dataset = Config.configure_data_node(id="initial_dataset", path=path_to_csv, storage_type="csv", has_header=True) date_cfg = Config.configure_data_node(id="date", default_data="None") preprocessed_dataset = Config.configure_data_node(id="preprocessed_dataset", cacheable=True, validity_period=dt.timedelta(days=1)) # 처리된 데이터λ₯Ό ν¬ν•¨ν•˜λŠ” μ΅œμ’… 데이터 λ…Έλ“œ train_dataset = Config.configure_data_node(id="train_dataset", cacheable=True, validity_period=dt.timedelta(days=1)) # 처리된 데이터λ₯Ό ν¬ν•¨ν•˜λŠ” μ΅œμ’… 데이터 λ…Έλ“œ trained_model = Config.configure_data_node(id="trained_model", cacheable=True, validity_period=dt.timedelta(days=1)) trained_model_baseline = Config.configure_data_node(id="trained_model_baseline", cacheable=True, validity_period=dt.timedelta(days=1)) # 처리된 데이터λ₯Ό ν¬ν•¨ν•˜λŠ” μ΅œμ’… 데이터 λ…Έλ“œ test_dataset = Config.configure_data_node(id="test_dataset", cacheable=True, validity_period=dt.timedelta(days=1)) forecast_dataset = Config.configure_data_node(id="forecast_dataset", scope=Scope.PIPELINE, cacheable=True, validity_period=dt.timedelta(days=1)) roc_data = Config.configure_data_node(id="roc_data", scope=Scope.PIPELINE, cacheable=True, validity_period=dt.timedelta(days=1)) score_auc = Config.configure_data_node(id="score_auc", scope=Scope.PIPELINE, cacheable=True, validity_period=dt.timedelta(days=1)) metrics = Config.configure_data_node(id="metrics", scope=Scope.PIPELINE, cacheable=True, validity_period=dt.timedelta(days=1)) feature_importance_cfg = Config.configure_data_node(id="feature_importance", scope=Scope.PIPELINE) results = Config.configure_data_node(id="results", scope=Scope.PIPELINE, cacheable=True, validity_period=dt.timedelta(days=1)) ############################################################################################################################## # μž‘μ—… 생성 ############################################################################################################################## # μž‘μ—…μ€ ν•¨μˆ˜λ₯Ό μ‹€ν–‰ν•˜λŠ” λ™μ•ˆ μž…λ ₯ 데이터 λ…Έλ“œμ™€ 좜λ ₯ 데이터 λ…Έλ“œ μ‚¬μ΄μ˜ 링크λ₯Ό λ§Œλ“­λ‹ˆλ‹€. # # 초기 λ°μ΄ν„°μ„ΈνŠΈ --> λ°μ΄ν„°μ„ΈνŠΈ μ „μ²˜λ¦¬ --> 처리된 λ°μ΄ν„°μ„ΈνŠΈ task_preprocess_dataset = Config.configure_task(id="preprocess_dataset", input=[initial_dataset,date_cfg], function=preprocess_dataset, output=preprocessed_dataset) # 처리된 λ°μ΄ν„°μ„ΈνŠΈ --> ν•™μŠ΅ 데이터 생성 --> ν•™μŠ΅λ°μ΄ν„°μ„ΈνŠΈ, ν…ŒμŠ€νŠΈλ°μ΄ν„°μ„ΈνŠΈ task_create_train_test = Config.configure_task(id="create_train_and_test_data", input=preprocessed_dataset, function=create_train_test_data, output=[train_dataset, test_dataset]) # ν•™μŠ΅ λ°μ΄ν„°μ„ΈνŠΈ --> ν•™μŠ΅ λͺ¨λΈ 데이터 생성 --> ν•™μŠ΅λœ λͺ¨λΈ task_train_model = Config.configure_task(id="train_model", input=train_dataset, function=train_model, output=[trained_model,feature_importance_cfg]) # ν•™μŠ΅ λ°μ΄ν„°μ„ΈνŠΈ --> ν•™μŠ΅ λͺ¨λΈ 데이터 생성 --> ν•™μŠ΅λœ λͺ¨λΈ task_train_model_baseline = Config.configure_task(id="train_model_baseline", input=train_dataset, function=train_model_baseline, output=[trained_model_baseline,feature_importance_cfg]) # ν…ŒμŠ€νŠΈ λ°μ΄ν„°μ„ΈνŠΈ --> 예츑 --> 예츑 데이터 μ„ΈνŠΈ task_forecast = Config.configure_task(id="predict_the_test_data", input=[test_dataset, trained_model], function=forecast, output=forecast_dataset) # ν…ŒμŠ€νŠΈ λ°μ΄ν„°μ„ΈνŠΈ --> 예츑 --> 예츑 데이터 μ„ΈνŠΈ task_forecast_baseline = Config.configure_task(id="predict_of_baseline", input=[test_dataset, trained_model_baseline], function=forecast_baseline, output=forecast_dataset) task_roc = Config.configure_task(id="task_roc", input=[forecast_dataset, test_dataset], function=roc_from_scratch, output=[roc_data,score_auc]) task_roc_baseline = Config.configure_task(id="task_roc_baseline", input=[forecast_dataset, test_dataset], function=roc_from_scratch, output=[roc_data,score_auc]) task_create_metrics = Config.configure_task(id="task_create_metrics", input=[forecast_dataset,test_dataset], function=create_metrics, output=metrics) task_create_results = Config.configure_task(id="task_create_results", input=[forecast_dataset,test_dataset], function=create_results, output=results) ############################################################################################################################## # νŒŒμ΄ν”„λΌμΈ 및 μ‹œλ‚˜λ¦¬μ˜€ 생성 ############################################################################################################################## # νŒŒμ΄ν”„λΌμΈ 및 μ‹œλ‚˜λ¦¬μ˜€ ꡬ성 pipeline_preprocessing = Config.configure_pipeline(id="pipeline_preprocessing", task_configs=[task_preprocess_dataset, task_create_train_test]) pipeline_train_baseline = Config.configure_pipeline(id="pipeline_train_baseline", task_configs=[task_train_model_baseline]) pipeline_train_model = Config.configure_pipeline(id="pipeline_train_model", task_configs=[task_train_model]) pipeline_model = Config.configure_pipeline(id="pipeline_model", task_configs=[task_forecast, task_roc, task_create_metrics, task_create_results]) pipeline_baseline = Config.configure_pipeline(id="pipeline_baseline", task_configs=[task_forecast_baseline, task_roc_baseline, task_create_metrics, task_create_results]) # μ‹œλ‚˜λ¦¬μ˜€λŠ” νŒŒμ΄ν”„λΌμΈμ„ μ‹€ν–‰ν•©λ‹ˆλ‹€. scenario_cfg = Config.configure_scenario(id="churn_classification", pipeline_configs=[pipeline_preprocessing, pipeline_train_baseline, pipeline_train_model, pipeline_model,pipeline_baseline], frequency=Frequency.WEEKLY)
from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score import datetime as dt import pandas as pd import numpy as np ############################################################################################################################## # μž‘μ—…μ—μ„œ μ‚¬μš©ν•˜λŠ” ν•¨μˆ˜ ############################################################################################################################## def preprocess_dataset(initial_dataset: pd.DataFrame, date: dt.datetime="None"): """이 ν•¨μˆ˜λŠ” λͺ¨λΈμ—μ„œ μ‚¬μš©ν•  데이터셋을 사전 μ²˜λ¦¬ν•©λ‹ˆλ‹€. Args: initial_dataset (pd.DataFrame): 데이터λ₯Ό 처음 읽을 λ•Œμ˜ μ›μ‹œ ν˜•μ‹ Returns: pd.DataFrame: λΆ„λ₯˜λ₯Ό μœ„ν•΄ 사전 처리된 데이터 μ„ΈνŠΈ """ print("\n λ°μ΄ν„°μ„ΈνŠΈ μ „μ²˜λ¦¬ 쀑...") #λ‚ μ§œμ˜ 데이터 ν”„λ ˆμž„μ„ ν•„ν„°λ§ν•©λ‹ˆλ‹€. if date != "None": initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) processed_dataset = initial_dataset[initial_dataset['Date'] <= date] print(len(processed_dataset)) else: processed_dataset = initial_dataset processed_dataset = processed_dataset[['CreditScore','Geography','Gender','Age','Tenure','Balance','NumOfProducts','HasCrCard','IsActiveMember','EstimatedSalary','Exited']] processed_dataset = pd.get_dummies(processed_dataset) if 'Gender_Female' in processed_dataset.columns: processed_dataset.drop('Gender_Female',axis=1,inplace=True) processed_dataset = processed_dataset.apply(pd.to_numeric) columns_to_select = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard', 'IsActiveMember', 'EstimatedSalary', 'Geography_France', 'Geography_Germany', 'Geography_Spain', 'Gender_Male','Exited'] processed_dataset = processed_dataset[[col for col in columns_to_select if col in processed_dataset.columns]] print(" μ „μ²˜λ¦¬ μ™„λ£Œ!\n") return processed_dataset def create_train_test_data(preprocessed_dataset: pd.DataFrame): """이 ν•¨μˆ˜λŠ” 데이터 μ„ΈνŠΈλ₯Ό λΆ„ν• ν•˜μ—¬ κΈ°μ°¨ 데이터λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€. Args: preprocessed_dataset (pd.DataFrame): μ „μ²˜λ¦¬λœ 데이터 μ„ΈνŠΈ Returns: pd.DataFrame: ν›ˆλ ¨ 데이터 μ„ΈνŠΈ """ print("\n Creating the training and testing dataset...") X_train, X_test, y_train, y_test = train_test_split(preprocessed_dataset.iloc[:,:-1],preprocessed_dataset.iloc[:,-1],test_size=0.2,random_state=42) train_data = pd.concat([X_train,y_train],axis=1) test_data = pd.concat([X_test,y_test],axis=1) print(" 생성 μ™„λ£Œ!") return train_data, test_data def train_model_baseline(train_dataset: pd.DataFrame): """λ‘œμ§€μŠ€ν‹± νšŒκ·€ λͺ¨λΈμ„ ν›ˆλ ¨μ‹œν‚€λŠ” ν•¨μˆ˜ Args: train_dataset (pd.DataFrame): ν•™μŠ΅ λ°μ΄ν„°μ„ΈνŠΈ Returns: model (LogisticRegression): ν”ΌνŒ…λœ λͺ¨λΈ """ print(" λͺ¨λΈ ν•™μŠ΅ 쀑...\n") X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1] model_fitted = LogisticRegression().fit(X,y) print("\n ",model_fitted," ν•™μŠ΅λ˜μ—ˆμŠ΅λ‹ˆλ‹€!") importance_dict = {'Features' : X.columns, 'Importance':model_fitted.coef_[0]} importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True) return model_fitted, importance def train_model(train_dataset: pd.DataFrame): """λ‘œμ§€μŠ€ν‹± νšŒκ·€ λͺ¨λΈμ„ ν›ˆλ ¨μ‹œν‚€λŠ” ν•¨μˆ˜ Args: train_dataset (pd.DataFrame): ν•™μŠ΅ 데이터 Returns: model (RandomForest): ν”ΌνŒ…λœ λͺ¨λΈ """ print(" λͺ¨λΈ ν•™μŠ΅ 쀑...\n") X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1] model_fitted = RandomForestClassifier().fit(X,y) print("\n ",model_fitted," ν•™μŠ΅λ˜μ—ˆμŠ΅λ‹ˆλ‹€!") importance_dict = {'Features' : X.columns, 'Importance':model_fitted.feature_importances_} importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True) return model_fitted, importance def forecast(test_dataset: pd.DataFrame, trained_model: RandomForestClassifier): """ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈλ₯Ό μ˜ˆμΈ‘ν•˜λŠ” κΈ°λŠ₯ Args: test_dataset (pd.DataFrame): ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ trained_model (LogisticRegression): ν”ΌνŒ…λœ λͺ¨λΈ Returns: forecast (pd.DataFrame): 예츑 데이터 μ„ΈνŠΈ """ print(" ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ 예츑 쀑...") X,y = test_dataset.iloc[:,:-1],test_dataset.iloc[:,-1] #predictions = trained_model.predict(X) predictions = trained_model.predict_proba(X)[:, 1] print(" 예츑 μ™„λ£Œ!") return predictions def forecast_baseline(test_dataset: pd.DataFrame, trained_model: LogisticRegression): """ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈλ₯Ό μ˜ˆμΈ‘ν•˜λŠ” κΈ°λŠ₯ Args: test_dataset (pd.DataFrame): ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ trained_model (LogisticRegression): ν”ΌνŒ…λœ λͺ¨λΈ Returns: forecast (pd.DataFrame): 예츑 데이터 μ„ΈνŠΈ """ print(" ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ 예츑 쀑...") X,y = test_dataset.iloc[:,:-1],test_dataset.iloc[:,-1] predictions = trained_model.predict_proba(X)[:, 1] print(" 예츑 μ™„λ£Œ!") return predictions def roc_from_scratch(probabilities, test_dataset, partitions=100): print(" ROC κ³‘μ„ μ˜ 계산...") y_test = test_dataset.iloc[:,-1] roc = np.array([]) for i in range(partitions + 1): threshold_vector = np.greater_equal(probabilities, i / partitions).astype(int) tpr, fpr = true_false_positive(threshold_vector, y_test) roc = np.append(roc, [fpr, tpr]) roc_np = roc.reshape(-1, 2) roc_data = pd.DataFrame({"False positive rate": roc_np[:, 0], "True positive rate": roc_np[:, 1]}) print(" 계산 μ™„λ£Œ") print(" μŠ€μ½”μ–΄λ§μ€‘...") score_auc = roc_auc_score(y_test, probabilities) print(" μŠ€μ½”μ–΄λ§ μ™„λ£Œ\n") return roc_data, score_auc def true_false_positive(threshold_vector:np.array, y_test:np.array): """μ°Έμ–‘μ„±λ₯ κ³Ό κ±°μ§“μ–‘μ„±λ₯ μ„ κ³„μ‚°ν•˜λŠ” ν•¨μˆ˜ Args: threshold_vector (np.array): ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ y_test (np.array): ν”ΌνŒ…λœ λͺ¨λΈ Returns: tpr (pd.DataFrame): 예츑된 데이터 μ„ΈνŠΈ fpr (pd.DataFrame): 예츑된 데이터 μ„ΈνŠΈ """ true_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 1) true_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 0) false_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 0) false_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 1) tpr = true_positive.sum() / (true_positive.sum() + false_negative.sum()) fpr = false_positive.sum() / (false_positive.sum() + true_negative.sum()) return tpr, fpr def create_metrics(predictions:np.array, test_dataset:np.array): print(" λ©”νŠΈλ¦­ 생성 쀑...") threshold = 0.5 threshold_vector = np.greater_equal(predictions, threshold).astype(int) y_test = test_dataset.iloc[:,-1] true_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 1)).sum() true_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 0)).sum() false_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 0)).sum() false_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 1)).sum() f1_score = np.around(2*true_positive/(2*true_positive+false_positive+false_negative), decimals=2) accuracy = np.around((true_positive+true_negative)/(true_positive+true_negative+false_positive+false_negative), decimals=2) dict_ftpn = {"tp": true_positive, "tn": true_negative, "fp": false_positive, "fn": false_negative} number_of_good_predictions = true_positive + true_negative number_of_false_predictions = false_positive + false_negative metrics = {"f1_score": f1_score, "accuracy": accuracy, "dict_ftpn": dict_ftpn, 'number_of_predictions': len(predictions), 'number_of_good_predictions':number_of_good_predictions, 'number_of_false_predictions':number_of_false_predictions} print(" λ©”νŠΈλ¦­ 생성 μ™„λ£Œ!") return metrics def create_results(forecast_values,test_dataset): forecast_series_proba = pd.Series(np.around(forecast_values,decimals=2), index=test_dataset.index, name='Probability') forecast_series = pd.Series((forecast_values>0.5).astype(int), index=test_dataset.index, name='Forecast') true_series = pd.Series(test_dataset.iloc[:,-1], name="Historical",index=test_dataset.index) index_series = pd.Series(range(len(true_series)), index=test_dataset.index, name="Id") results = pd.concat([index_series, forecast_series_proba, forecast_series, true_series], axis=1) return results
from pages.dialogs.dialog_roc_md import * from pages.compare_models_md import * from pages.data_visualization_md import * from pages.databases_md import * from pages.model_manager_md import * dialog_md = """ <|dialog|open={dr_show_roc}|title=ROC Curve|partial={dialog_partial_roc}|on_action=delete_dialog_roc|labels=Close|width=1000px|> """
# ν˜Όλ™ ν–‰λ ¬ λŒ€ν™” μƒμž db_confusion_matrix_md = """ <|part|render={db_table_selected=='Confusion Matrix'}| <center> <|{score_table}|table|height=200px|width=400px|show_all=True|> </center> |> """ # ν•™μŠ΅ 데이터 μ„ΈνŠΈμ— λŒ€ν•œ ν…Œμ΄λΈ” db_train_dataset_md = """ <|part|render={db_table_selected=='Training Dataset'}| <|{train_dataset}|table|width=1400px|height=560px|> |> """ # 예츑 데이터 μ„ΈνŠΈμ— λŒ€ν•œ ν…Œμ΄λΈ” db_forecast_dataset_md = """ <|part|render={db_table_selected=='Forecast Dataset'}| <center> <|{values}|table|height=560px|width=800px|style={get_style}|> </center> |> """ # ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈμ— λŒ€ν•œ ν…Œμ΄λΈ” db_test_dataset_md = """ <|part|render={db_table_selected=='Test Dataset'}| <|{test_dataset}|table|width=1400px|height=560px|> |> """ # ν‘œμ‹œν•  ν…Œμ΄λΈ”μ„ μ„ νƒν•˜λŠ” 선택기 db_table_selector = ['Training Dataset', 'Test Dataset', 'Forecast Dataset', 'Confusion Matrix'] db_table_selected = db_table_selector[0] # 전체 νŽ˜μ΄μ§€λ₯Ό λ§Œλ“€κΈ° μœ„ν•œ λ¬Έμžμ—΄ 집계 db_databases_md = """ # λ°μ΄ν„°λ² μ΄μŠ€ <|layout|columns=2 2 1|columns[mobile]=1| <| **Algorithm**: \n \n <|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown=True|> |> <| **Table**: \n \n <|{db_table_selected}|selector|lov={db_table_selector}|dropdown=True|> |> <br/> <br/> <|{PATH_TO_TABLE}|file_download|name=table.csv|label=Download table|> |> """ + db_test_dataset_md + db_confusion_matrix_md + db_train_dataset_md + db_forecast_dataset_md
import pandas as pd import numpy as np dv_graph_selector = ['Histogram','Scatter'] dv_graph_selected = dv_graph_selector[0] # νžˆμŠ€ν† κ·Έλž¨ λŒ€ν™” μƒμž dv_width_histo = "100%" dv_height_histo = 600 dv_dict_overlay = {'barmode':'overlay', "margin":{"t":20}} dv_select_x_ = ['CREDITSCORE', 'AGE', 'TENURE', 'BALANCE', 'NUMOFPRODUCTS', 'HASCRCARD', 'ISACTIVEMEMBER', 'ESTIMATEDSALARY', 'GEOGRAPHY_FRANCE', 'GEOGRAPHY_GERMANY', 'GEOGRAPHY_SPAIN', 'GENDER_MALE'] def creation_scatter_dataset(test_dataset:pd.DataFrame): """이 ν•¨μˆ˜λŠ” 산점도에 λŒ€ν•œ 데이터셋을 μƒμ„±ν•©λ‹ˆλ‹€. λͺ¨λ“  μ—΄(Exited μ œμ™Έ)에 λŒ€ν•΄ μ–‘μˆ˜ 및 음수 버전이 μžˆμŠ΅λ‹ˆλ‹€. μ–‘μˆ˜ 열은 Exitedκ°€ 0일 λ•Œ NaN을 κ°€μ§€λ©° 음수 열은 Exitedκ°€ 1일 λ•Œ NaN을 κ°–μŠ΅λ‹ˆλ‹€. Args: test_dataset (pd.DataFrame): ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ Returns: pd.DataFrame: λ°μ΄ν„°ν”„λ ˆμž„ """ scatter_dataset = test_dataset.copy() for column in scatter_dataset.columns: if column != 'EXITED' : column_neg = str(column)+'_neg' column_pos = str(column)+'_pos' scatter_dataset[column_neg] = scatter_dataset[column] scatter_dataset[column_pos] = scatter_dataset[column] scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN return scatter_dataset def creation_of_dialog_scatter(column, state=None): """이 μ½”λ“œλŠ” 산점도에 μ‚¬μš©λ˜λŠ” Markdown을 μƒμ„±ν•©λ‹ˆλ‹€. λΆ€λΆ„(λ‹€μ‹œ λ‘œλ“œν•  수 μžˆλŠ” λ―Έλ‹ˆ νŽ˜μ΄μ§€)을 λ³€κ²½ν•˜λŠ” 데 μ‚¬μš©λ©λ‹ˆλ‹€. 선택기가 μƒμ„±λ˜κ³  κ·Έλž˜ν”„μ˜ x 및 yλŠ” μ—¬κΈ°μ—μ„œ λ³€κ²½ν•˜μ—¬ κ²°μ •λ©λ‹ˆλ‹€. μ†μ„±μ˜ 사전도 μ‚¬μš©ν•˜λŠ” 열에 따라 λ³€κ²½λ©λ‹ˆλ‹€. """ if column == 'AGE' or column == 'CREDITSCORE' and state is not None: state.dv_dict_overlay = {'barmode':'overlay',"margin":{"t":20}} elif state is not None: state.dv_dict_overlay = {"margin":{"t":20}} md = """ <|layout|columns= 1 1 1|columns[mobile]=1| <| Type of graph \n \n <|{dv_graph_selected}|selector|lov={dv_graph_selector}|dropdown|> |> <| Select **x** \n \n <|{x_selected}|selector|lov={select_x}|dropdown=True|> |> <| Select **y** \n \n <|{y_selected}|selector|lov={select_y}|dropdown=True|> |> |> <|part|render={x_selected=='"""+column+"""'}| <|{scatter_dataset}|chart|x="""+column+"""|y[1]={y_selected+'_pos'}|y[2]={y_selected+'_neg'}|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|height={dv_height_histo}|width={dv_width_histo}|mode=markers|type=scatter|layout={dv_dict_overlay}|> |> """ return md def creation_histo_full(test_dataset:pd.DataFrame): """이 ν•¨μˆ˜λŠ” νžˆμŠ€ν† κ·Έλž¨ ν”Œλ‘―μ— λŒ€ν•œ 데이터 μ„ΈνŠΈλ₯Ό μƒμ„±ν•©λ‹ˆλ‹€. λͺ¨λ“  μ—΄(Exited μ œμ™Έ)에 λŒ€ν•΄ μ–‘μˆ˜ 및 음수 버전이 μžˆμŠ΅λ‹ˆλ‹€. Exitedκ°€ 0일 λ•Œ μ–‘μˆ˜ μ—΄μ—λŠ” NaN이 있고 Exitedκ°€ 1일 λ•Œ 음수 μ—΄μ—λŠ” NaN이 μžˆμŠ΅λ‹ˆλ‹€. Args: test_dataset (pd.DataFrame): ν…ŒμŠ€νŠΈ λ°μ΄ν„°μ„ΈνŠΈ Returns: pd.DataFrame: νžˆμŠ€ν† κ·Έλž¨μ„ ν‘œμ‹œν•˜λŠ” 데 μ‚¬μš©λ˜λŠ” 데이터 ν”„λ ˆμž„ """ histo_full = test_dataset.copy() # 각 ν΄λž˜μŠ€μ— λŒ€ν•΄ λ™μΌν•œ 수의 포인트λ₯Ό 갖도둝 결정적 μ˜€λ²„μƒ˜ν”Œλ§μ„ μƒμ„±ν•©λ‹ˆλ‹€. histo_1 = histo_full.loc[histo_full['EXITED'] == 1] frames = [histo_full,histo_1,histo_1,histo_1] histo_full = pd.concat(frames, sort=False) for column in histo_full.columns: column_neg = str(column)+'_neg' histo_full[column_neg] = histo_full[column] histo_full.loc[(histo_full['EXITED'] == 1),column_neg] = np.NaN histo_full.loc[(histo_full['EXITED'] == 0),column] = np.NaN return histo_full def creation_of_dialog_histogram(column, state=None): """이 μ½”λ“œλŠ” νžˆμŠ€ν† κ·Έλž¨ ν”Œλ‘―μ— μ‚¬μš©λ˜λŠ” λ§ˆν¬λ‹€μš΄μ„ μƒμ„±ν•©λ‹ˆλ‹€. λΆ€λΆ„(λ‹€μ‹œ λ‘œλ“œν•  수 μžˆλŠ” λ―Έλ‹ˆ νŽ˜μ΄μ§€)을 λ³€κ²½ν•˜λŠ” 데 μ‚¬μš©λ©λ‹ˆλ‹€. 선택기가 μƒμ„±λ˜κ³  κ·Έλž˜ν”„μ˜ xλŠ” μ—¬κΈ°μ—μ„œ λ³€κ²½ν•˜μ—¬ κ²°μ •λ©λ‹ˆλ‹€. μ†μ„±μ˜ 사전도 μ‚¬μš©ν•˜λŠ” 열에 따라 λ³€κ²½λ©λ‹ˆλ‹€. """ if column == 'AGE' or column == 'CREDITSCORE' and state is not None: state.dv_dict_overlay = {'barmode':'overlay',"margin":{"t":20}} elif state is not None: state.dv_dict_overlay = {"margin":{"t":20}} md = """ <|layout|columns= 1 1 1|columns[mobile]=1| <| Select type of graph : \n \n <|{dv_graph_selected}|selector|lov={dv_graph_selector}|dropdown|> |> <| Select **x**: \n \n <|{x_selected}|selector|lov={select_x}|dropdown=True|> |> |> <|{histo_full[['"""+column+"""','"""+column+"""_neg','EXITED']]}|chart|type=histogram|x[1]="""+column+"""|x[2]="""+column+"""_neg|y=EXITED|label=EXITED|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|height={dv_height_histo}|width={dv_width_histo}|layout={dv_dict_overlay}|class_name=histogram|> """ return md dv_data_visualization_md = """ # 데이터 μ‹œκ°ν™” <|part|render={dv_graph_selected == 'Histogram'}|partial={partial_histo}|> <|part|render={dv_graph_selected == 'Scatter'}|partial={partial_scatter}|> """
from sklearn.metrics import f1_score import pandas as pd import numpy as np cm_height_histo = "100%" cm_dict_barmode = {"barmode": "stack","margin":{"t":30}} cm_options_md = "height={cm_height_histo}|width={cm_height_histo}|layout={cm_dict_barmode}" cm_compare_models_md = """ # λͺ¨λΈ 비ꡐ <br/> <br/> <br/> <|layout|columns= 1 1 1|columns[mobile]=1| <|{accuracy_graph}|chart|type=bar|x=Pipeline|y[1]=Accuracy Model|y[2]=Accuracy Baseline|title=Accuracy|""" + cm_options_md + """|> <|{f1_score_graph}|chart|type=bar|x=Pipeline|y[1]=F1 Score Model|y[2]=F1 Score Baseline|title=F1 Score|""" + cm_options_md + """|> <|{score_auc_graph}|chart|type=bar|x=Pipeline|y[1]=AUC Score Model|y[2]=AUC Score Baseline|title=AUC Score|""" + cm_options_md + """|> |> """ def c_update_metrics(scenario, pipeline): """이 ν•¨μˆ˜λŠ” νŒŒμ΄ν”„λΌμΈμ„ μ‚¬μš©ν•˜μ—¬ μ‹œλ‚˜λ¦¬μ˜€μ˜ λ©”νŠΈλ¦­μ„ μ—…λ°μ΄νŠΈν•©λ‹ˆλ‹€. Args: scenario (scenario): μ„ νƒν•œ μ‹œλ‚˜λ¦¬μ˜€ pipeline (str): μ„ νƒν•œ νŒŒμ΄ν”„λΌμΈμ˜ 이름 Returns: obj: μ—¬λŸ¬ κ°’, λ©”νŠΈλ¦­μ„ λ‚˜νƒ€λ‚΄λŠ” λͺ©λ‘ """ metrics = scenario.pipelines[pipeline].metrics.read() number_of_predictions = metrics['number_of_predictions'] number_of_good_predictions = metrics['number_of_good_predictions'] number_of_false_predictions = metrics['number_of_false_predictions'] accuracy = np.around(metrics['accuracy'], decimals=2) f1_score = np.around(metrics['f1_score'], decimals=2) score_auc = np.around(scenario.pipelines[pipeline].score_auc.read(),decimals=2) dict_ftpn = metrics['dict_ftpn'] fp_ = dict_ftpn['fp'] tp_ = dict_ftpn['tp'] fn_ = dict_ftpn['fn'] tn_ = dict_ftpn['tn'] return number_of_predictions, accuracy, f1_score, score_auc, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_ def compare_charts(accuracies, f1_scores, scores_auc, names): """이 ν•¨μˆ˜λŠ” λͺ¨λΈ 비ꡐ νŽ˜μ΄μ§€μ—μ„œ μ‚¬μš©λ˜λŠ” pandas 데이터 ν”„λ ˆμž„(차트)을 μƒμ„±ν•©λ‹ˆλ‹€. Args: accuracies (list): 정확도 λͺ©λ‘ f1_scores (list): f1 점수 λͺ©λ‘ scores_auc (list): auc 점수 λͺ©λ‘ names (list): μ‹œλ‚˜λ¦¬μ˜€ 이름 λͺ©λ‘ Returns: pd.DataFrame: μ„Έ 개의 λ°μ΄ν„°ν”„λ ˆμž„μ˜ κ²°κ³Ό """ accuracy_graph = pd.DataFrame(create_metric_dict(accuracies, "Accuracy", names)) f1_score_graph = pd.DataFrame(create_metric_dict(f1_scores, "F1 Score", names)) score_auc_graph = pd.DataFrame(create_metric_dict(scores_auc, "AUC Score", names)) return accuracy_graph, f1_score_graph, score_auc_graph def compare_models_baseline(scenario,pipelines): """이 ν•¨μˆ˜λŠ” νŒŒμ΄ν”„λΌμΈ 비ꡐλ₯Ό μœ„ν•œ 개체λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€. Args: scenario (scenario): μ„ νƒν•œ μ‹œλ‚˜λ¦¬μ˜€ pipelines (str): μ„ νƒν•œ νŒŒμ΄ν”„λΌμΈμ˜ 이름 Returns: pd.DataFrame: μ„Έ 개의 λ°μ΄ν„°ν”„λ ˆμž„μ˜ κ²°κ³Ό """ accuracies = [] f1_scores = [] scores_auc = [] names = [] for pipeline in pipelines: (_,accuracy,f1_score,score_auc,_,_,_,_,_,_) = c_update_metrics(scenario, pipeline) accuracies.append(accuracy) f1_scores.append(f1_score) scores_auc.append(score_auc) names.append(pipeline[9:]) accuracy_graph,f1_score_graph, score_auc_graph = compare_charts(accuracies, f1_scores, scores_auc, names) return accuracy_graph, f1_score_graph, score_auc_graph def create_metric_dict(metric, metric_name, names): """이 ν•¨μˆ˜λŠ” Gui에 ν‘œμ‹œλœ 데이터 ν”„λ ˆμž„μ—μ„œ μ‚¬μš©λ  μ—¬λŸ¬ νŒŒμ΄ν”„λΌμΈμ— λŒ€ν•œ λ©”νŠΈλ¦­ 사전을 μƒμ„±ν•©λ‹ˆλ‹€. Args: metric (list): λ©”νŠΈλ¦­ κ°’ metric_name (str): λ©”νŠΈλ¦­μ˜ 이름 names (list): μ‹œλ‚˜λ¦¬μ˜€ 이름 λͺ©λ‘ Returns: dict: pandas λ°μ΄ν„°ν”„λž˜μž„μ— μ‚¬μš©λ˜λŠ” 사전 """ metric_dict = {} initial_list = [0]*len(names) metric_dict["Pipeline"] = names for i in range(len(names)): current_list = initial_list.copy() current_list[i] = metric[i] metric_dict[metric_name +" "+ names[i].capitalize()] = current_list return metric_dict
import pandas as pd import numpy as np mm_select_x_ = ['CREDITSCORE', 'AGE', 'TENURE', 'BALANCE', 'NUMOFPRODUCTS', 'HASCRCARD', 'ISACTIVEMEMBER', 'ESTIMATEDSALARY', 'GEOGRAPHY_FRANCE', 'GEOGRAPHY_GERMANY', 'GEOGRAPHY_SPAIN', 'GENDER_MALE'] mm_graph_selector_scenario = ['Metrics', 'Features', 'Histogram','Scatter'] mm_graph_selected_scenario = mm_graph_selector_scenario[0] mm_algorithm_selector = ['Baseline','ML'] mm_algorithm_selected = mm_algorithm_selector[0] mm_pie_color_dict_2 ={"piecolorway":["#00D08A","#FE913C"]} mm_pie_color_dict_4 = {"piecolorway":["#00D08A","#81F1A0","#F3C178","#FE913C"]} mm_height_histo = 530 mm_margin_features = {'margin': {'l': 150, 'r': 50, 'b': 50, 't': 20}} def creation_scatter_dataset_pred(test_dataset:pd.DataFrame, forecast_series:pd.Series): """이 ν•¨μˆ˜λŠ” μ˜ˆμΈ‘μ„ μœ„ν•œ 산점도에 λŒ€ν•œ 데이터 μ„ΈνŠΈλ₯Ό μƒμ„±ν•©λ‹ˆλ‹€. λͺ¨λ“  μ—΄(EXITED μ œμ™Έ)에 λŒ€ν•΄ μ–‘μˆ˜ 및 음수 버전이 μžˆμŠ΅λ‹ˆλ‹€. EXITEDλŠ” 예츑이 쒋은지 λ‚˜μœμ§€λ₯Ό λ‚˜νƒ€λ‚΄λŠ” λ°”μ΄λ„ˆλ¦¬μž…λ‹ˆλ‹€. μ–‘μˆ˜ 열은 Exitedκ°€ 0일 λ•Œ NaN을 κ°€μ§€λ©° 음수 열은 Exitedκ°€ 1일 λ•Œ NaN을 κ°–μŠ΅λ‹ˆλ‹€. Args: test_dataset (pd.DataFrame): ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ forecast_series (pd.DataFrame): 예츑 데이터 μ„ΈνŠΈ Returns: pd.DataFrame: νžˆμŠ€ν† κ·Έλž¨μ„ ν‘œμ‹œν•˜λŠ” 데 μ‚¬μš©λ˜λŠ” 데이터 ν”„λ ˆμž„ """ scatter_dataset = test_dataset.copy() scatter_dataset['EXITED'] = (scatter_dataset['EXITED']!=forecast_series.to_numpy()).astype(int) for column in scatter_dataset.columns: if column != 'EXITED' : column_neg = str(column)+'_neg' column_pos = str(column)+'_pos' scatter_dataset[column_neg] = scatter_dataset[column] scatter_dataset[column_pos] = scatter_dataset[column] scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN return scatter_dataset def creation_of_dialog_scatter_pred(column, state=None): """이 μ½”λ“œλŠ” μ˜ˆμΈ‘μ„ μœ„ν•œ 산점도에 μ‚¬μš©λ˜λŠ” λ§ˆν¬λ‹€μš΄μ„ μƒμ„±ν•©λ‹ˆλ‹€. λΆ€λΆ„(λ‹€μ‹œ λ‘œλ“œν•  수 μžˆλŠ” λ―Έλ‹ˆ νŽ˜μ΄μ§€)을 λ³€κ²½ν•˜λŠ” 데 μ‚¬μš©λ©λ‹ˆλ‹€. 선택기가 μƒμ„±λ˜κ³  κ·Έλž˜ν”„μ˜ x 및 yλŠ” μ—¬κΈ°μ—μ„œ λ³€κ²½ν•˜μ—¬ κ²°μ •λ©λ‹ˆλ‹€. μ†μ„±μ˜ 사전도 μ‚¬μš©ν•˜λŠ” 열에 따라 λ³€κ²½λ©λ‹ˆλ‹€. """ if column == 'AGE' or column == 'CREDITSCORE' and state is not None: state.dv_dict_overlay = {'barmode':'overlay',"margin":{"t":20}} elif state is not None: state.dv_dict_overlay = {"margin":{"t":20}} md = """ <|layout|columns= 1 1|columns[mobile]=1| <| Select **x** \n \n <|{x_selected}|selector|lov={select_x}|dropdown|> |> <| Select **y** \n \n <|{y_selected}|selector|lov={select_y}|dropdown|> |> |> <|{scatter_dataset_pred}|chart|x="""+column+"""|y[1]={y_selected+'_pos'}|y[2]={y_selected+'_neg'}|color[1]=red|color[2]=green|name[1]=Bad prediction|name[2]=Good prediction|height={mm_height_histo}|width={dv_width_histo}|mode=markers|type=scatter|layout={dv_dict_overlay}|> """ return md def creation_histo_full_pred(test_dataset:pd.DataFrame,forecast_series:pd.Series): """이 ν•¨μˆ˜λŠ” μ˜ˆμΈ‘μ— λŒ€ν•œ νžˆμŠ€ν† κ·Έλž¨ ν”Œλ‘―μ— λŒ€ν•œ 데이터 μ„ΈνŠΈλ₯Ό μƒμ„±ν•©λ‹ˆλ‹€. λͺ¨λ“  μ—΄(PREDICTION μ œμ™Έ)에 λŒ€ν•΄ μ–‘μˆ˜ 및 음수 버전이 μžˆμŠ΅λ‹ˆλ‹€. PREDICTION은 예츑이 쒋은지 λ‚˜μœμ§€λ₯Ό λ‚˜νƒ€λ‚΄λŠ” μ΄μ§„λ²•μž…λ‹ˆλ‹€. PREDICTION이 0일 λ•Œ μ–‘μˆ˜ μ—΄μ—λŠ” NaN이 있고 PREDICTION이 1일 λ•Œ 음수 μ—΄μ—λŠ” NaN이 μžˆμŠ΅λ‹ˆλ‹€. Args: test_dataset (pd.DataFrame): ν…ŒμŠ€νŠΈ 데이터 μ„ΈνŠΈ Forecast_series (pd.DataFrame): 예츑 데이터 μ„ΈνŠΈ Returns: pd.DataFrame: νžˆμŠ€ν† κ·Έλž¨μ„ ν‘œμ‹œν•˜λŠ” 데 μ‚¬μš©λ˜λŠ” 데이터 ν”„λ ˆμž„ """ histo_full = test_dataset.copy() histo_full['EXITED'] = (histo_full['EXITED']!=forecast_series.to_numpy()).astype(int) histo_full.columns = histo_full.columns.str.replace('EXITED', 'PREDICTION') for column in histo_full.columns: column_neg = str(column)+'_neg' histo_full[column_neg] = histo_full[column] histo_full.loc[(histo_full['PREDICTION'] == 1),column_neg] = np.NaN histo_full.loc[(histo_full['PREDICTION'] == 0),column] = np.NaN return histo_full metrics_md = """ <br/> <|layout|columns=1 1 1|columns[mobile]=1| <| <|{accuracy}|indicator|value={accuracy}|min=0|max=1|width=200px|> <center> **Model accuracy** </center> <|{pie_plotly}|chart|x=values|label=labels|title=Accuracy of predictions model|height={height_plotly}|width=100%|type=pie|layout={mm_pie_color_dict_2}|> |> <| <|{score_auc}|indicator|value={score_auc}|min=0|max=1|width=200px|> <center> **Model AUC** </center> <|{pie_confusion_matrix}|chart|x=values|label=labels|title=Confusion Matrix|height={height_plotly}|width=100%|type=pie|layout={mm_pie_color_dict_4}|> |> <| <|{f1_score}|indicator|value={f1_score}|min=0|max=1|width=200px|> <center> **Model F1-score** </center> <|{distrib_class}|chart|x=values|label=labels|title=Distribution between Exited and Stayed|height={height_plotly}|width=100%|type=pie|layout={mm_pie_color_dict_2}|> |> |> """ features_md = """ <|{features_table}|chart|type=bar|y=Features|x=Importance|orientation=h|layout={mm_margin_features}|> """ def creation_of_dialog_histogram_pred(column, state=None): """이 μ½”λ“œλŠ” 산점도에 μ‚¬μš©λœ λ§ˆν¬λ‹€μš΄μ„ μƒμ„±ν•©λ‹ˆλ‹€. λΆ€λΆ„(λ‹€μ‹œ λ‘œλ“œν•  수 μžˆλŠ” λ―Έλ‹ˆ νŽ˜μ΄μ§€)을 λ³€κ²½ν•˜λŠ” 데 μ‚¬μš©λ©λ‹ˆλ‹€. μ„ νƒμžκ°€ μƒμ„±λ˜κ³  μ—¬κΈ°μ—μ„œ λ³€κ²½ν•˜μ—¬ κ·Έλž˜ν”„μ˜ xκ°€ κ²°μ •λ©λ‹ˆλ‹€. μ†μ„±μ˜ 사전도 μ‚¬μš©ν•˜λŠ” 열에 따라 λ³€κ²½λ©λ‹ˆλ‹€. """ if column == 'AGE' or column == 'CREDITSCORE' and state is not None: state.dv_dict_overlay = {'barmode':'overlay',"margin":{"t":20}} elif state is not None: state.dv_dict_overlay = {"margin":{"t":20}} md = """ <| Select **x** \n \n <|{x_selected}|selector|lov={select_x}|dropdown=True|> |> <|{histo_full_pred[['"""+column+"""','"""+column+"""_neg','PREDICTION']]}|chart|type=histogram|x[1]="""+column+"""|x[2]="""+column+"""_neg|y=PREDICTION|label=PREDICTION|color[1]=red|color[2]=green|name[1]=Bad prediction|name[2]=Good prediction|height={mm_height_histo}|width={dv_width_histo}|layout={dv_dict_overlay}|class_name=histogram|> """ return md mm_model_manager_md = """ # λͺ¨λΈ λ§€λ‹ˆμ € <|layout|columns=1 1 1 1|columns[mobile]=1| Algorithm <|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown=True|> Type of graph <|{mm_graph_selected_scenario}|selector|lov={mm_graph_selector_scenario}|dropdown=True|> <br/> <br/> <center> <|show roc|button|on_action=show_roc_fct|> </center> <br/> <br/> <center> **Number of predictions: ** *<|{number_of_predictions}|>* </center> |> <|part|render={mm_graph_selected_scenario == 'Metrics'}| """+metrics_md+""" |> <|part|render={mm_graph_selected_scenario == 'Features'}| """+features_md+""" |> <|part|render={mm_graph_selected_scenario == 'Scatter'}|partial={partial_scatter_pred}|> <|part|render={mm_graph_selected_scenario == 'Histogram'}|partial={partial_histo_pred}|> """
# Roc λ‹€μ΄μ–Όλ‘œκ·Έ dr_show_roc = False def show_roc_fct(state): state.dr_show_roc = True def delete_dialog_roc(state): state.dr_show_roc = False dialog_roc = """ <center> <|{roc_dataset}|chart|x=False positive rate|y[1]=True positive rate|label[1]=True positive rate|height=500px|width=900px|type=scatter|> </center> """
from taipy.gui import Gui from page.page import * if __name__ == "__main__": Gui(page).run( use_reloader=True, title="Wine 🍷 production by Region and Year", dark_mode=False, )
from taipy.core.config import Config from config.config import df_wine_production page = """ # Wine production by Region and Year ## Data for all the regions: <|{df_wine_production}|table|height=400px|width=95%|> """
import pandas as pd def add_wine_colors(df_wine): """Adds 2 columns with Args: df_wine (DataFrame): Data from the csv file (input for the whole app) Returns: df_wine_with_colors: DataFrame with all the input columns plus 2 nexw ones, 'red_and_rose' and 'white', and drops 2 columns that are not needed """ print("add wine color columns") df_wine_with_colors = df_wine.reset_index(drop=True) df_wine_with_colors["red_and_rose"] = 0 df_wine_with_colors["white"] = 0 # General case: df_wine_with_colors.loc[ df_wine_with_colors["wine_type"].str.contains("ROUGE"), "red_and_rose" ] = 1 df_wine_with_colors.loc[ df_wine_with_colors["wine_type"].str.contains("BLANC"), "white" ] = 1 # For some reason, there is some lines where the information about wine color is in the name: df_wine_with_colors.loc[ df_wine_with_colors["AOC"].str.contains("Rouge") & df_wine_with_colors["wine_type"].str.contains("NORD - EST"), "red_and_rose", ] = 1 df_wine_with_colors.loc[ df_wine_with_colors["AOC"].str.contains("Blanc") & df_wine_with_colors["wine_type"].str.contains("NORD - EST"), "white", ] = 1 # Droip unnecessary columns df_wine_with_colors = df_wine_with_colors.drop(columns=["data_type", "wine_type"]) return df_wine_with_colors
import taipy as tp from taipy.core.config import Config # Loading of the TOML Config.load("config/taipy-config.toml") # Get the scenario configuration scenario_cfg = Config.scenarios["SCENARIO_WINE"] tp.Core().run() scenario_wine = tp.create_scenario(scenario_cfg) scenario_wine.submit() df_wine_production = scenario_wine.WINE_PRODUCTION_WITH_COLORS.read()
from taipy.gui import Gui from tensorflow.keras import models from PIL import Image # to change img path to actual image import numpy as np class_names = { 0:'airplane', 1:'automobile', 2:'bird', 3:'cat', 4:'deer', 5:'dog', 6:'frog', 7:'horse', 8:'ship', 9:'truck', } model = models.load_model("baseline_mariya.keras") def image_predict(model, path_to_img): img = Image.open(path_to_img) img = img.convert("RGB") img = img.resize((32,32)) data = np.asarray(img) data = data/255 probs = model.predict(np.array([data])[:1]) top_prob = probs.max() top_pred = class_names[np.argmax(probs)] return top_prob,top_pred # index = "<h1>Kaise Ho!!</h1>" # index = "# Kaise Ho!!" img_path = "img.png" content = "" prob = 0 pred = "" index = """ <|text-center| <|{"logo.png"}|image|width=12vw|> <|{content}|file_selector|extensions=.png,.jpeg|> Select an image from your file system <|{pred}|> <|{img_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > """ def on_change(state, var_name,var_value): if var_name == "content": top_prob,top_pred = image_predict(model,var_value) state.prob = round(top_prob*100) state.pred = "This is a " + top_pred state.img_path = var_value # print(var_name, var_value) app = Gui(page=index) if __name__ == "__main__": app.run(use_reloader=True) # means we dont need to type python classifier.py to run it again just refresh the page