text
stringlengths 0
105k
|
|---|
from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.exceptions.exceptions import NonExistingTask, NonExistingTaskConfig from taipy.core.task._task_manager_factory import _TaskManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import TaskSchema def _get_or_raise(task_id: str): manager = _TaskManagerFactory._build_manager() task = manager._get(task_id) if task is None: raise NonExistingTask(task_id) return task REPOSITORY = "task" class TaskResource(Resource): """Single object resource --- get: tags: - api summary: Get a task. description: | Return a single task by *task_id*. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string description: The identifier of the task. responses: 200: content: application/json: schema: type: object properties: task: TaskSchema 404: description: No task has the *task_id* identifier. delete: tags: - api summary: Delete a task. description: | Delete a task. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string description: The identifier of the task. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No task has the *task_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, task_id): schema = TaskSchema() task = _get_or_raise(task_id) return {"task": schema.dump(_to_model(REPOSITORY, task))} @_middleware def delete(self, task_id): manager = _TaskManagerFactory._build_manager() _get_or_raise(task_id) manager._delete(task_id) return {"message": f"Task {task_id} was deleted."} class TaskList(Resource): """Creation and get_all --- get: tags: - api summary: Get all tasks. description: | Return an array of all tasks. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/tasks ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/TaskSchema' post: tags: - api summary: Create a task. description: | Create a new task from its *config_id*. If the config does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/tasks?config_id=my_task_config ``` parameters: - in: query name: config_id schema: type: string description: The identifier of the task configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. task: TaskSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.tasks.get(config_id) if not config: raise NonExistingTaskConfig(config_id) return config @_middleware def get(self): schema = TaskSchema(many=True) manager = _TaskManagerFactory._build_manager() tasks = [_to_model(REPOSITORY, task) for task in manager._get_all()] return schema.dump(tasks) @_middleware def post(self): args = request.args config_id = args.get("config_id") schema = TaskSchema() manager = _TaskManagerFactory._build_manager() if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) task = manager._bulk_get_or_create([config])[0] return { "message": "Task was created.", "task": schema.dump(_to_model(REPOSITORY, task)), }, 201 class TaskExecutor(Resource): """Execute a task --- post: tags: - api summary: Execute a task. description: | Execute a task by *task_id*. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EXECUTOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/tasks/submit/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. task: TaskSchema 404: description: No task has the *task_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, task_id): manager = _TaskManagerFactory._build_manager() task = _get_or_raise(task_id) manager._orchestrator().submit_task(task) return {"message": f"Task {task_id} was submitted."}
|
import uuid from typing import Optional from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core import Job, JobId from taipy.core.exceptions.exceptions import NonExistingJob, NonExistingTaskConfig from taipy.core.job._job_manager_factory import _JobManagerFactory from taipy.core.task._task_manager_factory import _TaskManagerFactory from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import JobSchema def _get_or_raise(job_id: str): manager = _JobManagerFactory._build_manager() job = manager._get(job_id) if job is None: raise NonExistingJob(job_id) return job class JobResource(Resource): """Single object resource --- get: tags: - api summary: Get a job. description: | Return a single job by *job_id*. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string description: The identifier of the job. responses: 200: content: application/json: schema: type: object properties: job: JobSchema 404: description: No job has the *job_id* identifier. delete: tags: - api summary: Delete a job. description: | Delete a job. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string description: The identifier of the job. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No job has the *job_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, job_id): schema = JobSchema() job = _get_or_raise(job_id) return {"job": schema.dump(job)} @_middleware def delete(self, job_id): manager = _JobManagerFactory._build_manager() job = _get_or_raise(job_id) manager._delete(job) return {"message": f"Job {job_id} was deleted."} class JobList(Resource): """Creation and get_all --- get: tags: - api summary: Get all jobs. description: | Return an array of all jobs. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/jobs ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/JobSchema' post: tags: - api summary: Create a job. description: | Create a job from a task *config_id*. If the config does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/jobs?task_id=TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: query name: task_id schema: type: string description: The identifier of the task configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. job: JobSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.tasks.get(config_id) if not config: raise NonExistingTaskConfig(config_id) return config @_middleware def get(self): schema = JobSchema(many=True) manager = _JobManagerFactory._build_manager() jobs = manager._get_all() return schema.dump(jobs) @_middleware def post(self): args = request.args task_config_id = args.get("task_id") if not task_config_id: raise ConfigIdMissingException manager = _JobManagerFactory._build_manager() schema = JobSchema() job = self.__create_job_from_schema(task_config_id) manager._set(job) return { "message": "Job was created.", "job": schema.dump(job), }, 201 def __create_job_from_schema(self, task_config_id: str) -> Optional[Job]: task_manager = _TaskManagerFactory._build_manager() task = task_manager._bulk_get_or_create([self.fetch_config(task_config_id)])[0] return Job( id=JobId(f"JOB_{uuid.uuid4()}"), task=task, submit_id=f"SUBMISSION_{uuid.uuid4()}", submit_entity_id=task.id ) class JobExecutor(Resource): """Cancel a job --- post: tags: - api summary: Cancel a job. description: | Cancel a job by *job_id*. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EXECUTOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/jobs/cancel/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. job: JobSchema 404: description: No job has the *job_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, job_id): manager = _JobManagerFactory._build_manager() job = _get_or_raise(job_id) manager._cancel(job) return {"message": f"Job {job_id} was cancelled."}
|
from flask import request from flask_restful import Resource from taipy.core.exceptions.exceptions import NonExistingScenario, NonExistingSequence from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ScenarioIdMissingException, SequenceNameMissingException from ..middlewares._middleware import _middleware from ..schemas import SequenceResponseSchema def _get_or_raise(sequence_id: str): manager = _SequenceManagerFactory._build_manager() sequence = manager._get(sequence_id) if sequence is None: raise NonExistingSequence(sequence_id) return sequence REPOSITORY = "sequence" class SequenceResource(Resource): """Single object resource --- get: tags: - api summary: Get a sequence. description: | Return a single sequence by sequence_id. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_READER_ role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string description: The identifier of the sequence. responses: 200: content: application/json: schema: type: object properties: sequence: SequenceSchema 404: description: No sequence has the *sequence_id* identifier. delete: tags: - api summary: Delete a sequence. description: | Delete a sequence. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_EDITOR_ role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string description: The identifier of the sequence. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No sequence has the *sequence_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, sequence_id): schema = SequenceResponseSchema() sequence = _get_or_raise(sequence_id) return {"sequence": schema.dump(_to_model(REPOSITORY, sequence))} @_middleware def delete(self, sequence_id): manager = _SequenceManagerFactory._build_manager() _get_or_raise(sequence_id) manager._delete(sequence_id) return {"message": f"Sequence {sequence_id} was deleted."} class SequenceList(Resource): """Creation and get_all --- get: tags: - api summary: Get all sequences. description: | Return an array of all sequences. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_READER_ role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/sequences ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/SequenceSchema' post: tags: - api summary: Create a sequence. description: | Create a sequence from scenario_id, sequence_name and task_ids. If the scenario_id does not exist or sequence_name is not provided, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_EDITOR_ role. Code example: ```shell curl -X POST --data '{"scenario_id": "SCENARIO_scenario_id", "sequence_name": "sequence", "tasks": []}' http://localhost:5000/api/v1/sequences ``` parameters: - in: query name: scenario_id schema: type: string description: The Scenario the Sequence belongs to. name: sequence_name schema: type: string description: The name of the Sequence. name: tasks schema: type: list[string] description: A list of task id of the Sequence. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. sequence: SequenceSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self): schema = SequenceResponseSchema(many=True) manager = _SequenceManagerFactory._build_manager() sequences = [_to_model(REPOSITORY, sequence) for sequence in manager._get_all()] return schema.dump(sequences) @_middleware def post(self): sequence_data = request.json scenario_id = sequence_data.get("scenario_id") sequence_name = sequence_data.get("sequence_name") sequence_task_ids = sequence_data.get("task_ids", []) response_schema = SequenceResponseSchema() if not scenario_id: raise ScenarioIdMissingException if not sequence_name: raise SequenceNameMissingException scenario = _ScenarioManagerFactory._build_manager()._get(scenario_id) if not scenario: raise NonExistingScenario(scenario_id=scenario_id) scenario.add_sequence(sequence_name, sequence_task_ids) sequence = scenario.sequences[sequence_name] return { "message": "Sequence was created.", "sequence": response_schema.dump(_to_model(REPOSITORY, sequence)), }, 201 class SequenceExecutor(Resource): """Execute a sequence --- post: tags: - api summary: Execute a sequence. description: | Execute a sequence from sequence_id. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), This endpoint requires _TAIPY_EXECUTOR_ role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/sequences/submit/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. sequence: SequenceSchema 404: description: No sequence has the *sequence_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, sequence_id): _get_or_raise(sequence_id) manager = _SequenceManagerFactory._build_manager() manager._submit(sequence_id) return {"message": f"Sequence {sequence_id} was submitted."}
|
from .cycle import CycleList, CycleResource from .datanode import DataNodeList, DataNodeReader, DataNodeResource, DataNodeWriter from .job import JobExecutor, JobList, JobResource from .scenario import ScenarioExecutor, ScenarioList, ScenarioResource from .sequence import SequenceExecutor, SequenceList, SequenceResource from .task import TaskExecutor, TaskList, TaskResource __all__ = [ "DataNodeResource", "DataNodeList", "DataNodeReader", "DataNodeWriter", "TaskList", "TaskResource", "TaskExecutor", "SequenceList", "SequenceResource", "SequenceExecutor", "ScenarioList", "ScenarioResource", "ScenarioExecutor", "CycleResource", "CycleList", "JobResource", "JobList", "JobExecutor", ]
|
from typing import List import numpy as np import pandas as pd from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.core.data.operator import Operator from taipy.core.exceptions.exceptions import NonExistingDataNode, NonExistingDataNodeConfig from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import ( CSVDataNodeConfigSchema, DataNodeFilterSchema, DataNodeSchema, ExcelDataNodeConfigSchema, GenericDataNodeConfigSchema, InMemoryDataNodeConfigSchema, JSONDataNodeConfigSchema, PickleDataNodeConfigSchema, SQLTableDataNodeConfigSchema, SQLDataNodeConfigSchema, MongoCollectionDataNodeConfigSchema, ) ds_schema_map = { "csv": CSVDataNodeConfigSchema, "pickle": PickleDataNodeConfigSchema, "in_memory": InMemoryDataNodeConfigSchema, "sql_table": SQLTableDataNodeConfigSchema, "sql": SQLDataNodeConfigSchema, "mongo_collection": MongoCollectionDataNodeConfigSchema, "excel": ExcelDataNodeConfigSchema, "generic": GenericDataNodeConfigSchema, "json": JSONDataNodeConfigSchema, } REPOSITORY = "data" def _get_or_raise(data_node_id: str) -> None: manager = _DataManagerFactory._build_manager() data_node = manager._get(data_node_id) if not data_node: raise NonExistingDataNode(data_node_id) return data_node class DataNodeResource(Resource): """Single object resource --- get: tags: - api description: | Returns a `DataNodeSchema^` representing the unique `DataNode^` identified by the *datanode_id* given as parameter. If no data node corresponds to *datanode_id*, a `404` error is returned. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/datanodes/DATANODE_hist_cfg_75750ed8-4e09-4e00-958d -e352ee426cc9 ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `DATANODE_hist_cfg_75750ed8-4e09-4e00-958d-e352ee426cc9` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to retrieve. In case of success here is an example of the response: ``` JSON {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ``` In case of failure here is an example of the response: ``` JSON {"message":"DataNode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found"} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( "http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d") print(response) print(response.json()) ``` `DATANODE_hist_cfg_75750ed8-4e09-4e00-958d-e352ee426cc9` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to retrieve. In case of success here is an output example: ``` <Response [200]> {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ``` In case of failure here is an output example: ``` <Response [404]> {"message":"DataNode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found"} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: datanode_id schema: type: string description: The identifier of the data node to retrieve. responses: 200: content: application/json: schema: type: object properties: datanode: DataNodeSchema 404: description: No data node has the *datanode_id* identifier. delete: tags: - api summary: Delete a data node. description: | Deletes the `DataNode^` identified by the *datanode_id* given as parameter. If the data node does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X DELETE \ http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to delete. In case of success here is an example of the response: ``` JSON {"msg": "datanode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d deleted"} ``` In case of failure here is an example of the response: ``` JSON {"message": "Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete( "http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d") print(response) print(response.json()) ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the value of the *datanode_id* parameter. It represents the identifier of the Cycle we want to delete. In case of success here is an output example: ``` <Response [200]> {"msg": "Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d deleted."} ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: datanode_id schema: type: string description: The identifier of the data node to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No data node has the *datanode_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, datanode_id): schema = DataNodeSchema() datanode = _get_or_raise(datanode_id) return {"datanode": schema.dump(_to_model(REPOSITORY, datanode))} @_middleware def delete(self, datanode_id): _get_or_raise(datanode_id) manager = _DataManagerFactory._build_manager() manager._delete(datanode_id) return {"message": f"Data node {datanode_id} was deleted."} class DataNodeList(Resource): """Creation and get_all --- get: tags: - api description: | Returns a `DataNodeSchema^` list representing all existing data nodes. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/datanodes ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [ {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ] ``` If there is no data node, the response is an empty list as follows: ``` JSON [] ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get("http://localhost:5000/api/v1/datanodes") print(response) print(response.json()) ``` In case of success here is an output example: ``` <Response [200]> [ {"datanode": { "id": "DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d", "config_id": "historical_data_set", "scope": "<Scope.SCENARIO: 2>", "storage_type": "csv", "name": "Name of my historical data node", "owner_id": "SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38", "last_edit_date": "2022-08-10T16:03:40.855082", "job_ids": [], "version": "latest", "cacheable": false, "validity_days": null, "validity_seconds": null, "edit_in_progress": false, "data_node_properties": { "path": "daily-min-temperatures.csv", "has_header": true} }} ] ``` If there is no data node, the response is an empty list as follows: ``` <Response [200]> [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/DataNodeSchema' post: tags: - api description: | Creates a new data node from the *config_id* given as parameter. !!! Example === "Curl" ```shell curl -X POST http://localhost:5000/api/v1/datanodes?config_id=historical_data_set ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In this example the *config_id* value ("historical_data_set") is given as parameter directly in the url. A corresponding `DataNodeConfig^` must exist and must have been configured before. Here is the output message example: ``` {"msg": "datanode created", "datanode": { "default_path": null, "path": "daily-min-temperatures.csv", "name": null, "storage_type": "csv", "scope": 2, "has_header": true} } ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post("http://localhost:5000/api/v1/datanodes?config_id=historical_data_set") print(response) print(response.json()) ``` In this example the *config_id* value ("historical_data_set") is given as parameter directly in the url. A corresponding `DataNodeConfig^` must exist and must have been configured before. Here is the output example: ``` <Response [201]> {'msg': 'datanode created', 'datanode': { 'name': None, 'scope': 2, 'path': 'daily-min-temperatures.csv', 'storage_type': 'csv', 'default_path': None, 'has_header': True}} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: query name: config_id schema: type: string description: The identifier of the data node configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. datanode: DataNodeSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.data_nodes.get(config_id) if not config: raise NonExistingDataNodeConfig(config_id) return config @_middleware def get(self): schema = DataNodeSchema(many=True) manager = _DataManagerFactory._build_manager() datanodes = [_to_model(REPOSITORY, datanode) for datanode in manager._get_all()] return schema.dump(datanodes) @_middleware def post(self): args = request.args config_id = args.get("config_id") if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) schema = ds_schema_map.get(config.storage_type)() manager = _DataManagerFactory._build_manager() manager._bulk_get_or_create({config}) return { "message": "Data node was created.", "datanode": schema.dump(config), }, 201 class DataNodeReader(Resource): """Single object resource --- get: tags: - api description: | Returns the data read from the data node identified by *datanode_id*. If the data node does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X GET \ http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d/read ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the *datanode_id* parameter. It represents the identifier of the data node to read. Here is an output example. In this case, the storage type of the data node to read is `csv`, and no exposed type is specified. The data is exposed as a list of dictionaries, each dictionary representing a raw of the csv file. ``` {"data": [ {"Date": "1981-01-01", "Temp": 20.7}, {"Date": "1981-01-02", "Temp": 17.9}, {"Date": "1981-01-03", "Temp": 18.8}, {"Date": "1981-01-04", "Temp": 14.6}, {"Date": "1981-01-05", "Temp": 15.8}, {"Date": "1981-01-06", "Temp": 15.8}, {"Date": "1981-01-07", "Temp": 15.8} ]} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( "http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d/read") print(response) print(response.json()) ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the *datanode_id* parameter. It represents the identifier of the data node to read. Here is an output example. In this case, the storage type of the data node to read is `csv`, and no exposed type is specified. The data is exposed as a list of dictionaries, each dictionary representing a raw of the csv file. ``` {"data": [ {"Date": "1981-01-01", "Temp": 20.7}, {"Date": "1981-01-02", "Temp": 17.9}, {"Date": "1981-01-03", "Temp": 18.8}, {"Date": "1981-01-04", "Temp": 14.6}, {"Date": "1981-01-05", "Temp": 15.8}, {"Date": "1981-01-06", "Temp": 15.8}, {"Date": "1981-01-07", "Temp": 15.8} ]} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: datanode_id schema: type: string description: The id of the data node to read. requestBody: content: application/json: schema: DataNodeFilterSchema responses: 200: content: application/json: schema: type: object properties: data: type: Any description: The data read from the data node. 404: description: No data node has the *datanode_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def __make_operators(self, schema: DataNodeFilterSchema) -> List: return [ ( x.get("key"), x.get("value"), Operator(getattr(Operator, x.get("operator", "").upper())), ) for x in schema.get("operators") ] @_middleware def get(self, datanode_id): schema = DataNodeFilterSchema() data = request.get_json(silent=True) data_node = _get_or_raise(datanode_id) operators = self.__make_operators(schema.load(data)) if data else [] data = data_node.filter(operators) if isinstance(data, pd.DataFrame): data = data.to_dict(orient="records") elif isinstance(data, np.ndarray): data = list(data) return {"data": data} class DataNodeWriter(Resource): """Single object resource --- put: tags: - api summary: Write into a data node. description: | Write data from request body into a data node by *datanode_id*. If the data node does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X PUT -d '[{"path": "/abc", "type": 1}, {"path": "/def", "type": 2}]' -H 'Content-Type: application/json' http://localhost:5000/api/v1/datanodes/DATANODE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9/write ``` parameters: - in: path name: datanode_id schema: type: string requestBody: content: application/json: schema: Any responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No data node has the *datanode_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def put(self, datanode_id): data = request.json data_node = _get_or_raise(datanode_id) data_node.write(data) return {"message": f"Data node {datanode_id} was successfully written."}
|
from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.exceptions.exceptions import NonExistingScenario, NonExistingScenarioConfig from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import ScenarioResponseSchema def _get_or_raise(scenario_id: str): manager = _ScenarioManagerFactory._build_manager() scenario = manager._get(scenario_id) if scenario is None: raise NonExistingScenario(scenario_id) return scenario REPOSITORY = "scenario" class ScenarioResource(Resource): """Single object resource --- get: tags: - api description: | Returns a `ScenarioSchema^` representing the unique scenario identified by *scenario_id*. If no scenario corresponds to *scenario_id*, a `404` error is returned. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to retrieve. In case of success here is an example of the response: ``` JSON {"scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true}} ``` In case of failure here is an example of the response: ``` JSON {"message": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( "http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Cycle we want to retrieve. In case of success here is an output example: ``` <Response [200]> {"scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true}} ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to retrieve. responses: 200: content: application/json: schema: type: object properties: scenario: ScenarioSchema 404: description: No scenario has the *scenario_id* identifier. delete: tags: - api description: | Delete the `Scenario^` scenario identified by the *scenario_id* given as parameter. If the scenario does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X DELETE http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the scenario we want to delete. In case of success here is an example of the response: ``` JSON {"msg": "Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c deleted."} ``` In case of failure here is an example of the response: ``` JSON {"message": "Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete( "http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to delete. In case of success here is an output example: ``` <Response [200]> {"msg": "Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c deleted."} ``` In case of failure here is an output example: ``` <Response [404]> {'message': 'Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No scenario has the *scenario_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def get(self, scenario_id): schema = ScenarioResponseSchema() scenario = _get_or_raise(scenario_id) return {"scenario": schema.dump(_to_model(REPOSITORY, scenario))} @_middleware def delete(self, scenario_id): manager = _ScenarioManagerFactory._build_manager() _get_or_raise(scenario_id) manager._delete(scenario_id) return {"message": f"Scenario {scenario_id} was deleted."} class ScenarioList(Resource): """Creation and get_all --- get: tags: - api summary: Get all scenarios. description: | Returns a `ScenarioSchema^` list representing all existing Scenarios. !!! Example === "Curl" ```shell curl -X GET http://localhost:5000/api/v1/scenarios ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [{ "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true } ] ``` If there is no scenario, the response is an empty list as follows: ``` JSON [] ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get("http://localhost:5000/api/v1/scenarios") print(response) print(response.json()) ``` In case of success here is an output example: ``` <Response [200]> [{ "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true } ] ``` If there is no scenario, the response is an empty list as follows: ``` <Response [200]> [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/ScenarioSchema' post: tags: - api description: | Creates a new scenario from the *config_id*. If the config does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X POST http://localhost:5000/api/v1/scenarios?config_id=my_scenario_config ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In this example the *config_id* value ("my_scenario_config") is given as parameter directly in the url. A corresponding `ScenarioConfig^` must exist and must have been configured before. Here is the output message example: ``` {"msg": "scenario created.", "scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true} } ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post("http://localhost:5000/api/v1/scenarios?config_id=my_scenario_config") print(response) print(response.json()) ``` In this example the *config_id* value ("my_scenario_config") is given as parameter directly in the url. A corresponding `ScenarioConfig^` must exist and must have been configured before. Here is the output example: ``` <Response [201]> {"msg": "scenario created.", "scenario": { "cycle": "CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d", "id": "SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c", "properties": {}, "tags": [], "version": "latest", "sequences": [ "SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99", "SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4"], "subscribers": [], "creation_date": "2022-08-15T19:21:01.871587", "primary_scenario": true} } ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: query name: config_id schema: type: string description: The identifier of the scenario configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. scenario: ScenarioSchema """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") def fetch_config(self, config_id): config = Config.scenarios.get(config_id) if not config: raise NonExistingScenarioConfig(config_id) return config @_middleware def get(self): schema = ScenarioResponseSchema(many=True) manager = _ScenarioManagerFactory._build_manager() scenarios = [_to_model(REPOSITORY, scenario) for scenario in manager._get_all()] return schema.dump(scenarios) @_middleware def post(self): args = request.args config_id = args.get("config_id") response_schema = ScenarioResponseSchema() manager = _ScenarioManagerFactory._build_manager() if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) scenario = manager._create(config) return { "message": "Scenario was created.", "scenario": response_schema.dump(_to_model(REPOSITORY, scenario)), }, 201 class ScenarioExecutor(Resource): """Execute a scenario --- post: tags: - api description: | Executes a scenario by *scenario_id*. If the scenario does not exist, a 404 error is returned. !!! Example === "Curl" ```shell curl -X POST http://localhost:5000/api/v1/scenarios/submit/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to submit. Here is the output message example: ``` {"message": "Executed scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c."} ``` === "Python" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post( "http://localhost:5000/api/v1/scenarios/submit/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to submit. Here is the output example: ``` <Response [202]> {"message": "Executed scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c."} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EXECUTOR` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to submit. responses: 202: content: application/json: schema: type: object properties: message: type: string description: Status message. scenario: ScenarioSchema 404: description: No scenario has the *scenario_id* identifier. """ def __init__(self, **kwargs): self.logger = kwargs.get("logger") @_middleware def post(self, scenario_id): _get_or_raise(scenario_id) manager = _ScenarioManagerFactory._build_manager() manager._submit(scenario_id) return {"message": f"Scenario {scenario_id} was submitted."}
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from functools import wraps from importlib import util from taipy.core.common._utils import _load_fct def _middleware(f): @wraps(f) def wrapper(*args, **kwargs): if _using_enterprise(): return _enterprise_middleware()(f)(*args, **kwargs) else: return f(*args, **kwargs) return wrapper def _using_enterprise(): return util.find_spec("taipy.enterprise") is not None def _enterprise_middleware(): return _load_fct("taipy.enterprise.rest.api.middlewares._middleware", "_middleware")
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
class ConfigIdMissingException(Exception): def __init__(self): self.message = "Config id is missing." class ScenarioIdMissingException(Exception): def __init__(self): self.message = "Scenario id is missing." class SequenceNameMissingException(Exception): def __init__(self): self.message = "Sequence name is missing."
|
from marshmallow import Schema, fields class CycleSchema(Schema): name = fields.String() frequency = fields.String() properties = fields.Dict() creation_date = fields.String() start_date = fields.String() end_date = fields.String() class CycleResponseSchema(CycleSchema): id = fields.String()
|
from marshmallow import Schema, fields class TaskSchema(Schema): config_id = fields.String() id = fields.String() owner_id = fields.String() parent_ids = fields.List(fields.String) input_ids = fields.List(fields.String) function_name = fields.String() function_module = fields.String() output_ids = fields.List(fields.String) version = fields.String()
|
from marshmallow import Schema, fields class CallableSchema(Schema): fct_name = fields.String() fct_module = fields.String() class JobSchema(Schema): id = fields.String() task_id = fields.String() status = fields.String() force = fields.Boolean() creation_date = fields.String() subscribers = fields.Nested(CallableSchema) stacktrace = fields.List(fields.String)
|
from marshmallow import Schema, fields class SequenceSchema(Schema): owner_id = fields.String() parent_ids = fields.List(fields.String) tasks = fields.List(fields.String) version = fields.String() properties = fields.Dict() class SequenceResponseSchema(SequenceSchema): id = fields.String() subscribers = fields.List(fields.Dict)
|
from .cycle import CycleResponseSchema, CycleSchema from .datanode import ( CSVDataNodeConfigSchema, DataNodeConfigSchema, DataNodeFilterSchema, DataNodeSchema, ExcelDataNodeConfigSchema, GenericDataNodeConfigSchema, InMemoryDataNodeConfigSchema, JSONDataNodeConfigSchema, MongoCollectionDataNodeConfigSchema, PickleDataNodeConfigSchema, SQLDataNodeConfigSchema, SQLTableDataNodeConfigSchema, ) from .job import JobSchema from .scenario import ScenarioResponseSchema, ScenarioSchema from .sequence import SequenceResponseSchema, SequenceSchema from .task import TaskSchema __all__ = [ "DataNodeSchema", "DataNodeFilterSchema", "TaskSchema", "SequenceSchema", "SequenceResponseSchema", "ScenarioSchema", "ScenarioResponseSchema", "CycleSchema", "CycleResponseSchema", "JobSchema", ]
|
from marshmallow import Schema, fields, pre_dump class DataNodeSchema(Schema): config_id = fields.String() scope = fields.String() id = fields.String() storage_type = fields.String() name = fields.String() owner_id = fields.String() parent_ids = fields.List(fields.String) last_edit_date = fields.String() job_ids = fields.List(fields.String) version = fields.String() cacheable = fields.Boolean() validity_days = fields.Float() validity_seconds = fields.Float() edit_in_progress = fields.Boolean() properties = fields.Dict() class DataNodeConfigSchema(Schema): name = fields.String() storage_type = fields.String() scope = fields.Integer() cacheable = fields.Boolean() @pre_dump def serialize_scope(self, obj, **kwargs): obj.scope = obj.scope.value return obj class CSVDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() has_header = fields.Boolean() class InMemoryDataNodeConfigSchema(DataNodeConfigSchema): default_data = fields.Inferred() class PickleDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() default_data = fields.Inferred() class SQLTableDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() table_name = fields.String() class SQLDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() read_query = fields.String() write_query = fields.List(fields.String()) class MongoCollectionDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() collection_name = fields.String() class ExcelDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() has_header = fields.Boolean() sheet_name = fields.String() class GenericDataNodeConfigSchema(DataNodeConfigSchema): pass class JSONDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() class OperatorSchema(Schema): key = fields.String() value = fields.Inferred() operator = fields.String() class DataNodeFilterSchema(DataNodeConfigSchema): operators = fields.List(fields.Nested(OperatorSchema)) join_operator = fields.String(default="AND")
|
from marshmallow import Schema, fields class ScenarioSchema(Schema): sequences = fields.Dict() properties = fields.Dict() primary_scenario = fields.Boolean(default=False) tags = fields.List(fields.String) version = fields.String() class ScenarioResponseSchema(ScenarioSchema): id = fields.String() subscribers = fields.List(fields.Dict) cycle = fields.String() creation_date = fields.String()
|
from importlib import util import inspect import os if util.find_spec("taipy") and util.find_spec("taipy.gui"): from taipy.gui import Gui taipy_path = f"{os.path.dirname(os.path.dirname(inspect.getfile(Gui)))}" potential_file_paths = [ f"{taipy_path}{os.sep}gui{os.sep}viselements.json", f"{taipy_path}{os.sep}gui_core{os.sep}viselements.json", ] if potential_file_paths := [ path for path in potential_file_paths if os.path.exists(path) ]: print(f"Path: {';;;'.join(potential_file_paths)}") else: print("Visual element descriptor files not found in taipy-gui package") else: print("taipy-gui package is not installed within the selected python environment")
|
import taipy as tp import pandas as pd from taipy import Config from taipy.gui import Gui, Markdown, notify Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() input_text_cfg = Config.configure_data_node(id="input_text") text_length_cfg = Config.configure_data_node(id="text_length") count_characters_cfg = Config.configure_task(id="count_characters", function=len, input=input_text_cfg, output=text_length_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id="count_characters", task_configs=[count_characters_cfg]) scenario_list = tp.get_scenarios() input_text = "" main_md = Markdown(""" # Taipy Character Counter Enter Text: <|{input_text}|input|> <|Submit|button|on_action=submit|> ---------- Past Results: <|{create_results_table(scenario_list)}|table|width=fit-content|> """) def submit(state): scenario = tp.create_scenario(scenario_cfg) scenario.input_text.write(state.input_text) state.input_text = "" tp.submit(scenario, wait=True) notify(state, "S", "Submitted!") state.scenario_list = tp.get_scenarios() def create_results_table(scenario_list): table = [(s.id, s.input_text.read(), s.text_length.read()) for s in scenario_list] df = pd.DataFrame(table, columns=["id", "input_text", "text_length"]) print(df) return df tp.Core().run() gui = Gui(main_md) gui.run(run_browser=False)
|
import json def add_line(source, line, step): line = line.replace('Getting Started with Taipy', 'Getting Started with Taipy on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/en/latest/getting_started/src/') line = line.replace('(dataset.csv)', '(https://docs.taipy.io/en/latest/getting_started/step_01/dataset.csv)') if line.startswith('[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('<div align="center">\n') source.append(f' <img src={img_src} {width}>\n') source.append('</div>\n') elif step == 'step_00' and line.startswith('Gui(page='): source.append('\n') source.append('Gui("# Getting Started with Taipy").run(dark_mode=False)\n') elif line.startswith('Gui(page=') and step != 'step_00': search_for_md = line.split(')') name_of_md = search_for_md[0][9:] source.append(f'gui = Gui({name_of_md})\n') source.append(f'gui.run()\n') elif step == 'step_00' and line.startswith('from taipy'): source.append("from taipy.gui import Gui, Markdown\n") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": ['# !pip install taipy\n', '# !pip install scikit-learn\n', '# !pip install statsmodels'] }) notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": ['## Using Notebooks\n',] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(0, 10)] + ['step_10', 'step_11', 'step_12'] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\n') cell = "markdown" for line in split_text: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { "cells": [], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2)
|
from step_08 import * # Get all the scenarios already created all_scenarios = tp.get_scenarios() # Delete the scenarios that don't have a name attribute # All the scenarios of the previous steps do not have an associated name so they will be deleted, # this will not be the case for those created by this step [tp.delete(scenario.id) for scenario in all_scenarios if scenario.name is None] # Initial variable for the scenario selector # The list of possible values (lov) for the scenario selector is a list of tuples (scenario_id, scenario_name), # but the selected_scenario is just used to retrieve the scenario id and what gets displayed is the name of the scenario. scenario_selector = [(scenario.id, scenario.name) for scenario in tp.get_scenarios()] selected_scenario = None scenario_manager_page = page + """ # Create your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Create new scenario|button|on_action=create_scenario|> ## Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> ## Display the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_name_for_scenario(state) -> str: name = f"Scenario ({state.day.strftime('%A, %d %b')}; {state.max_capacity}; {state.n_predictions})" # Change the name if it is the same as some scenarios if name in [s[1] for s in state.scenario_selector]: name += f" ({len(state.scenario_selector)})" return name # Change the create_scenario function in order to change the default parameters # and allow the creation of multiple scenarios def create_scenario(state): print("Execution of scenario...") # Extra information for the scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario scenario = tp.create_scenario(scenario_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Submit the scenario that is currently selected submit_scenario(state) def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format (change?) day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes scenario.day.write(day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) scenario.creation_date = state.day # Execute the scenario tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Update the scenario selector state.scenario_selector += [(scenario.id, scenario.name)] def update_chart(state): # Now, the selected_scenario comes from the state, it is interactive scenario = tp.get(state.selected_scenario[0]) pipeline = scenario.pipelines[state.selected_pipeline] update_predictions_dataset(state, pipeline) def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update the chart when the scenario or the pipeline is changed # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": tp.Core().run() Gui(page=scenario_manager_page).run(dark_mode=False)
|
from step_07 import * # Initial variables ## Initial variables for the scenario day = dt.datetime(2021, 7, 26) n_predictions = 40 max_capacity = 200 page_scenario_manager = page + """ # Change your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Save changes|button|on_action={submit_scenario}|> Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action={update_chart}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): global selected_scenario print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) selected_scenario = scenario.id tp.submit(scenario) def submit_scenario(state): print("Submitting scenario...") # Get the selected scenario: in this current step a single scenario is created then modified here. scenario = tp.get(selected_scenario) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the datanodes scenario.day.write(state_day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) # Execute the pipelines/code tp.submit(scenario) # Update the chart when we change the scenario update_chart(state) def update_chart(state): # Select the right scenario and pipeline scenario = tp.get(selected_scenario) pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline update_predictions_dataset(state, pipeline) if __name__ == "__main__": global selected_scenario tp.Core().run() # Creation of a single scenario create_scenario() Gui(page=page_scenario_manager).run(dark_mode=False)
|
import datetime as dt import pandas as pd from taipy import Config, Scope from step_01 import path_to_csv # Datanodes (3.1) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", path=path_to_csv, scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id="day", default_data=dt.datetime(2021, 7, 26)) n_predictions_cfg = Config.configure_data_node(id="n_predictions", default_data=40) max_capacity_cfg = Config.configure_data_node(id="max_capacity", default_data=200) ## Remaining Data Nodes cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset", validity_period=dt.timedelta(days=1), scope=Scope.GLOBAL) predictions_cfg = Config.configure_data_node(id="predictions", scope=Scope.PIPELINE) # Functions (3.2) def clean_data(initial_dataset: pd.DataFrame): print(" Cleaning data") # Convert the date column to datetime initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def predict_baseline(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting baseline") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset['Date'] < day] predictions = train_dataset['Value'][-n_predictions:].reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Tasks (3.3) clean_data_task_cfg = Config.configure_task(id="clean_data", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) predict_baseline_task_cfg = Config.configure_task(id="predict_baseline", function=predict_baseline, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg)
|
from step_05 import * from step_06 import scenario_cfg from taipy import Config # Set the list of pipelines names # It will be used in a selector of pipelines pipeline_selector = ["baseline", "ml"] selected_pipeline = pipeline_selector[0] scenario_page = page + """ Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action=update_chart|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) scenario = submit_scenario(scenario) return scenario def submit_scenario(scenario): print("Submitting scenario...") tp.submit(scenario) return scenario def update_chart(state): print("'Update chart' button clicked") # Select the right pipeline pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline # It is the same function as created before in step_5 update_predictions_dataset(state, pipeline) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() tp.Core().run() # Creation of our first scenario scenario = create_scenario() Gui(page=scenario_page).run(dark_mode=False)
|
# For the sake of clarity, we have used an AutoRegressive model rather than a pure ML model such as: # Random Forest, Linear Regression, LSTM, etc from statsmodels.tsa.ar_model import AutoReg from taipy import Config from step_04 import * from step_03 import cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg, predictions_cfg, pd, dt # This is the function that will be used by the task def predict_ml(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting with ML") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset["Date"] < day] # Fit the AutoRegressive model model = AutoReg(train_dataset["Value"], lags=7).fit() # Get the n_predictions forecasts predictions = model.forecast(n_predictions).reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Create the task configuration of the predict_ml function. ## We use the same input and ouput as the previous predict_baseline task but we change the funtion predict_ml_task_cfg = Config.configure_task(id="predict_ml", function=predict_ml, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) # Create the new pipeline that will clean and predict with the ml model ml_pipeline_cfg = Config.configure_pipeline(id="ml", task_configs=[clean_data_task_cfg, predict_ml_task_cfg]) # Configure our scenario which is our business problem. scenario_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg]) # The configuration is now complete if __name__ == "__main__": tp.Core().run() # Create the scenario scenario = tp.create_scenario(scenario_cfg) # Execute it tp.submit(scenario) # Get the resulting scenario ## Print the predictions of the two pipelines (baseline and ml) print("\nBaseline predictions\n", scenario.baseline.predictions.read()) print("\nMachine Learning predictions\n", scenario.ml.predictions.read())
|
from step_01 import dataset, n_week, Gui # Select the week based on the slider value dataset_week = dataset[dataset["Date"].dt.isocalendar().week == n_week] page = """ # Getting started with Taipy Select week: *<|{n_week}|>* <|{n_week}|slider|min=1|max=52|> <|{dataset_week}|chart|type=bar|x=Date|y=Value|height=100%|width=100%|> """ # on_change is the function that is called when any variable is changed def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] if __name__ == "__main__": Gui(page=page).run(dark_mode=False)
|
from step_11 import * from sklearn.metrics import mean_absolute_error, mean_squared_error # Initial dataset for comparison comparison_scenario = pd.DataFrame({"Scenario Name": [], "RMSE baseline": [], "MAE baseline": [], "RMSE ML": [], "MAE ML": []}) # Indicates if the comparison is done comparison_scenario_done = False # Selector for metrics metric_selector = ["RMSE", "MAE"] selected_metric = metric_selector[0] def compute_metrics(historical_data, predicted_data): rmse = mean_squared_error(historical_data, predicted_data) mae = mean_absolute_error(historical_data, predicted_data) return rmse, mae def compare(state): print("Comparing...") # Initial lists for comparison scenario_names = [] rmses_baseline = [] maes_baseline = [] rmses_ml = [] maes_ml = [] # Go through all the primary scenarios all_scenarios = tp.get_primary_scenarios() all_scenarios_ordered = sorted(all_scenarios, key=lambda x: x.creation_date.timestamp()) for scenario in all_scenarios_ordered: print(f"Scenario {scenario.name}") # Go through all the pipelines for pipeline in scenario.pipelines.values(): print(f" Pipeline {pipeline.config_id}") # Get the predictions dataset with the historical data only_prediction_dataset = create_predictions_dataset(pipeline)[-pipeline.n_predictions.read():] # Series to compute the metrics (true values and predicted values) historical_values = only_prediction_dataset["Historical values"] predicted_values = only_prediction_dataset["Predicted values"] # Compute the metrics for this pipeline and primary scenario rmse, mae = compute_metrics(historical_values, predicted_values) # Add values to the appropriate lists if "baseline" in pipeline.config_id: rmses_baseline.append(rmse) maes_baseline.append(mae) elif "ml" in pipeline.config_id: rmses_ml.append(rmse) maes_ml.append(mae) scenario_names.append(scenario.creation_date.strftime("%A %d %b")) # Update comparison_scenario state.comparison_scenario = pd.DataFrame({"Scenario Name": scenario_names, "RMSE baseline": rmses_baseline, "MAE baseline": maes_baseline, "RMSE ML": rmses_ml, "MAE ML": maes_ml}) # When comparison_scenario_done will be set to True, # the part with the graphs will be finally rendered state.comparison_scenario_done = True # Performance page page_performance = """ <br/> <|part|render={comparison_scenario_done}| <|Table|expanded=False|expandable| <|{comparison_scenario}|table|width=100%|> |> <|{selected_metric}|selector|lov={metric_selector}|dropdown|> <|part|render={selected_metric=="RMSE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=RMSE baseline|y[2]=RMSE ML|height=100%|width=100%|> |> <|part|render={selected_metric=="MAE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=MAE baseline|y[2]=MAE ML|height=100%|width=100%|> |> |> <center> <|Compare primarys|button|on_action=compare|> </center> """ lov_menu = [("Data-Visualization", "Data Visualization"), ("Scenario-Manager", "Scenario Manager"), ("Performance", "Performance")] # Create a menu with our pages root_md = "<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>" pages = {"/":root_md, "Data-Visualization":page_data_visualization, "Scenario-Manager":page_scenario_manager, "Performance":page_performance} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value["args"][0]) if __name__ == "__main__": tp.Core().run() Gui(pages=pages).run(dark_mode=False)
|
import numpy as np import pandas as pd from step_04 import tp, baseline_pipeline_cfg, dt from step_02 import * # Initialize the "predictions" dataset predictions_dataset = pd.DataFrame( {"Date": [dt.datetime(2021, 6, 1)], "Historical values": [np.NaN], "Predicted values": [np.NaN]}) # Add a button and a chart for our predictions pipeline_page = page + """ Press <|predict|button|on_action=predict|> to predict with default parameters (30 predictions) and June 1st as day. <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def predict(state): print("'Predict' button clicked") pipeline = create_and_submit_pipeline() update_predictions_dataset(state, pipeline) def create_and_submit_pipeline(): print("Execution of pipeline...") # Create the pipeline from the pipeline config pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(pipeline) return pipeline def create_predictions_dataset(pipeline): print("Creating predictions dataset...") # Read data from the pipeline predictions = pipeline.predictions.read() day = pipeline.day.read() n_predictions = pipeline.n_predictions.read() cleaned_data = pipeline.cleaned_dataset.read() # Set arbitrarily the time window for the chart as 5 times the number of predictions window = 5 * n_predictions # Create the historical dataset that will be displayed new_length = len(cleaned_data[cleaned_data["Date"] < day]) + n_predictions temp_df = cleaned_data[:new_length] temp_df = temp_df[-window:].reset_index(drop=True) # Create the series that will be used in the concat historical_values = pd.Series(temp_df["Value"], name="Historical values") predicted_values = pd.Series([np.NaN] * len(temp_df), name="Predicted values") predicted_values[-len(predictions):] = predictions # Create the predictions dataset # Columns : [Date, Historical values, Predicted values] return pd.concat([temp_df["Date"], historical_values, predicted_values], axis=1) def update_predictions_dataset(state, pipeline): print("Updating predictions dataset...") state.predictions_dataset = create_predictions_dataset(pipeline) if __name__ == "__main__": tp.Core().run() Gui(page=pipeline_page).run(dark_mode=False)
|
from taipy import Gui import pandas as pd def get_data(path_to_csv: str): # pandas.read_csv() returns a pd.DataFrame dataset = pd.read_csv(path_to_csv) dataset["Date"] = pd.to_datetime(dataset["Date"]) return dataset # Read the dataframe path_to_csv = "dataset.csv" dataset = get_data(path_to_csv) # Initial value n_week = 10 # Definition of the page page = """ # Getting started with Taipy Week number: *<|{n_week}|>* Interact with this slider to change the week number: <|{n_week}|slider|min=1|max=52|> ## Dataset: Display the last three months of data: <|{dataset[9000:]}|chart|type=bar|x=Date|y=Value|height=100%|> <|{dataset}|table|height=400px|width=95%|> """ if __name__ == "__main__": # Create a Gui object with our page content Gui(page=page).run(dark_mode=False)
|
from step_10 import * from step_06 import ml_pipeline_cfg from taipy import Config, Frequency from taipy.gui import notify # Create scenarios each week and compare them scenario_daily_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg], frequency=Frequency.DAILY) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Change the inital scenario selector to see which scenarios are primary scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] # Redefine update_scenario_selector to add "*" in the display name when the scnario is primary def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Create the scenario name for the scenario selector # This name changes dependind whether the scenario is primary or not scenario_name = ("*" if scenario.is_primary else "") + scenario.name print(scenario_name) # Update the scenario selector state.scenario_selector += [(scenario.id, scenario_name)] selected_scenario_is_primary = None # Change the create_scenario function to create a scenario with the selected frequency def create_scenario(state): print("Execution of scenario...") # Extra information for scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario with the week cycle scenario = tp.create_scenario(scenario_daily_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Change the scenario that is currently selected submit_scenario(state) # This is the same code as in step_9_dynamic_scenario_creation.py def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes # if state.day != scenario.day.read(): scenario.day.write(state_day) # if int(state.n_predictions) != scenario.n_predictions.read(): scenario.n_predictions.write(int(state.n_predictions)) # if state.max_capacity != scenario.max_capacity.read(): scenario.max_capacity.write(int(state.max_capacity)) # if state.day != scenario.creation_date: scenario.creation_date = state.day # Execute the pipelines/code tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def make_primary(state): print("Making the current scenario primary...") scenario = tp.get(state.selected_scenario[0]) # Take the current scenario primary tp.set_primary(scenario) # Update the scenario selector accordingly state.scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] state.selected_scenario_is_primary = True def remove_scenario_from_selector(state, scenario: list): # Take all the scenarios in the selector that doesn't have the scenario.id state.scenario_selector = [(s[0], s[1]) for s in state.scenario_selector if s[0] != scenario.id] state.selected_scenario = state.scenario_selector[-1] def delete_scenario(state): scenario = tp.get(state.selected_scenario[0]) if scenario.is_primary: # Notify the user that primary scenarios can not be deleted notify(state, "info", "Cannot delete the primary scenario") else: # Delete the scenario and the related objects (datanodes, tasks, jobs,...) tp.delete(scenario.id) # Update the scenario selector accordingly remove_scenario_from_selector(state, scenario) # Add a "Delete scenario" and a "Make primary" buttons page_scenario_manager = """ # Create your scenario: <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/> <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <br/> <br/> <br/> <br/> <|Delete scenario|button|on_action=delete_scenario|active={len(scenario_selector)>0}|> <|Make primary|button|on_action=make_primary|active={not(selected_scenario_is_primary) and len(scenario_selector)>0}|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ lov_menu = [("Data-Visualization", "Data Visualization"), ("Scenario-Manager", "Scenario Manager")] # Create a menu with our pages root_md = "<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>" pages = {"/":root_md, "Data-Visualization":page_data_visualization, "Scenario-Manager":page_scenario_manager} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value["args"][0]) def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update selected_scenario_is_primary indicating if the current scenario is primary or not state.selected_scenario_is_primary = tp.get(state.selected_scenario[0]).is_primary # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": tp.Core().run() Gui(pages=pages).run(dark_mode=False)
|
from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page="# Getting started with *Taipy*").run(dark_mode=False)
|
from step_09 import * from taipy.gui import navigate # Our first page is the original page # (with the slider and the chart that displays a week of the historical data) page_data_visualization = page # Second page: create scenarios and display results page_scenario_manager = """ # Create your scenario <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/>\n <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ lov_menu = [("Data-Visualization", "Data Visualization"), ("Scenario-Manager", "Scenario Manager")] # Create a menu with our pages root_md = "<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>" pages = {"/":root_md, "Data-Visualization":page_data_visualization, "Scenario-Manager":page_scenario_manager} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value["args"][0]) if __name__ == "__main__": tp.Core().run() Gui(pages=pages).run(dark_mode=False)
|
import taipy as tp from step_03 import Config, clean_data_task_cfg, predict_baseline_task_cfg, dt # Create the first pipeline configuration baseline_pipeline_cfg = Config.configure_pipeline(id="baseline", task_configs=[clean_data_task_cfg, predict_baseline_task_cfg]) ## Execute the "baseline" pipeline if __name__ == "__main__": tp.Core().run() # Create the pipeline baseline_pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(baseline_pipeline) # Read output data from the pipeline baseline_predictions = baseline_pipeline.predictions.read() print("Predictions of baseline algorithm\n", baseline_predictions)
|
AWS_ACCESS_KEY = '' AWS_SECRET_KEY = '' AWS_REGION = '' S3_BUCKET_NAME = ''
|
from flask import Flask, render_template, redirect, url_for, request, flash from werkzeug.utils import secure_filename from uploads.file_handler import is_file_type_allowed, upload_file_to_s3, get_presigned_file_url from localStoragePy import localStoragePy from transformers import AutoTokenizer, pipeline # from tensorflow.keras.preprocessing.sequence import pad_sequences # from tensorflow.keras.models import load_model from taipy.gui import Gui import webbrowser import tensorflow as tf import pandas as pd import numpy as np import pytorch_pretrained_bert as ppb assert 'bert-large-cased' in ppb.modeling.PRETRAINED_MODEL_ARCHIVE_MAP app = Flask(__name__) app.secret_key = '3d6f45a5fc12445dbac2f59c3b6c7cb1' localStorage = localStoragePy('app', 'json') target_arr = ["df['col1'].nunique()", "df.sort_values(by=['col1'],inplace =True)", "df.sort_values(by=['col1', 'col2'],inplace =True)", "df.sort_values(by=['col1', 'col2', 'col3'],inplace =True)", "df.drop(columns = 'col1',inplace = True)", "new_df=df.loc[:, ['col1','col2']]", "df['col1'].value_counts()", "<|{dataset}|chart|type=bar|x=col1|y=col2|height=100%|>", "<|{dataset}|chart|type=pie|values=col2|labels=col1|height=100%|>", "<|{dataset}|chart|mode=lines|x=col1|y=col2|>"] portNo = 8888 @app.route("/", methods=['GET']) def home(): return render_template('home.html') @app.route("/upload-file", methods=['POST']) def upload_file(): if 'file' not in request.files: flash('No file uploaded', 'danger') return redirect(url_for('home')) file_to_upload = request.files['file'] if file_to_upload.filename == '': flash('No file uploaded', 'danger') return redirect(url_for('home')) if file_to_upload and is_file_type_allowed(file_to_upload.filename): provided_file_name = secure_filename(file_to_upload.filename) stored_file_name = upload_file_to_s3(file_to_upload, provided_file_name) localStorage.setItem("stored_file_name", stored_file_name) localStorage.setItem("provided_file_name", provided_file_name) flash(f'{provided_file_name} was successfully uploaded', 'success') return redirect(url_for('home')) @app.route("/query", methods=['POST']) def query(): try: query = request.form['query'] provided_file_name = localStorage.getItem("provided_file_name") stored_file_name = localStorage.getItem("stored_file_name") csv = get_presigned_file_url(stored_file_name, provided_file_name) df = pd.read_csv(csv) print("query: " + query) prediction_int, cols_requested = getPredictionInt(df, query) if prediction_int < 7: panda_query = target_arr[prediction_int] print(panda_query) for i in range(len(cols_requested)): panda_query = panda_query.replace("col" + str(i+1), cols_requested[i]) exec(panda_query) html_string = ''' <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet"> </head> <body> <div class="justify-content-center mt-5"> <div class="text-center"> <h4 class="">Download CSV <a href='{new_presigned_url}'> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" style="width:20px; height:20px;"> <path stroke-linecap="round" stroke-linejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3" /> </svg> </a> </h4> <p>Click <a href='/'>here</a> to return to home page</p> {table} </div> </div> </body> </html>. ''' filename = "new.csv" df.to_csv(filename, index=False) file_to_upload = open("new.csv", 'rb') new_provided_file_name = secure_filename(filename) new_stored_file_name = upload_file_to_s3(file_to_upload, new_provided_file_name) new_presigned_url = get_presigned_file_url(new_stored_file_name, new_provided_file_name) print("Presigned url: " + new_presigned_url) df = df.reset_index(drop=True) html = df.to_html(classes='table table-striped table-bordered w-75 mx-auto') html = html.replace("text-align: right;", "text-align: left;") toDisplay = html_string.format(table = html, new_presigned_url = new_presigned_url) return toDisplay else: print("taipy") taipy_query = target_arr[prediction_int] dataset = df for i in range(len(cols_requested)): taipy_query = taipy_query.replace("col" + str(i+1), cols_requested[i]) page = """{0}""" page = page.format(taipy_query) gui = Gui(page) global portNo portNum = portNo portNo += 1 webbrowser.open_new_tab('http://localhost:' + str(portNum)) gui.run(port=portNum) print("hello world") return redirect(url_for('home')) except: print("Invalid query") flash('Invalid query', 'danger') return redirect(url_for('home')) def getPredictionInt(df, query): cols = df.columns sentence = query words = sentence.split() cols_requested = [] for item in cols: for word in words: if(item.upper() == word.upper()): cols_requested.append(item) general_sentence = sentence for i in range(len(cols_requested)): general_sentence = general_sentence.replace(cols_requested[i], "col" + str(i+1)) model_id = "tanishabhagwanani/distilbert-base-uncased-finetuned-emotion" classifier = pipeline("text-classification", model=model_id) custom_question = query preds = classifier(custom_question, return_all_scores=True) preds_df = pd.DataFrame(preds[0]) prediction_int = np.argmax(preds_df.score) return prediction_int, cols_requested if __name__=='__main__': app.run(host="localhost", port=8000, debug=True)
|
import uuid import boto3 from config import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET_NAME s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, region_name=AWS_REGION ) ALLOWED_FILE_TYPES = {'csv'} S3_BUCKET_NAME = S3_BUCKET_NAME S3_EXPIRES_IN_SECONDS = 100 def get_file_type(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() def is_file_type_allowed(filename): return get_file_type(filename) in ALLOWED_FILE_TYPES def upload_file_to_s3(file, provided_file_name): stored_file_name = f'{str(uuid.uuid4())}.{get_file_type(provided_file_name)}' s3.upload_fileobj(file, S3_BUCKET_NAME, stored_file_name) return stored_file_name def get_presigned_file_url(stored_file_name, provided_file_name): if not stored_file_name or not provided_file_name: return return s3.generate_presigned_url( 'get_object', Params = { 'Bucket': S3_BUCKET_NAME, 'Key': stored_file_name, 'ResponseContentDisposition': f"attachment; filename = {provided_file_name}" }, ExpiresIn = S3_EXPIRES_IN_SECONDS )
|
import json def add_line(source, line, step): on_change_needed = ['step_02', 'step_09', 'step_11'] line = line.replace('Getting Started with Taipy', 'Getting Started with Taipy on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/getting_started/src/') line = line.replace('(dataset.csv)', '(https://docs.taipy.io/getting_started/step_01/dataset.csv)') if line.startswith('[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('<div align="center">\n') source.append(f' <img src={img_src} {width}>\n') source.append('</div>\n') elif step == 'step_00' and line.startswith('Gui(page='): source.append('\n') source.append('# We can use Gui("# Getting Started with Taipy").run() directly\n') source.append('# However, we need a Markdown and Gui object to modify the content of the page\n') source.append('# in the Notebook\n') source.append('\n') source.append('main_page = Markdown("# Getting Started with Taipy")\n') source.append('gui = Gui(main_page)\n') source.append('gui.run(dark_mode=False)\n') elif line.startswith('Gui(page=') and step != 'step_00': search_for_md = line.split(')') name_of_md = search_for_md[0][9:] source.append('gui.stop()\n') if step in on_change_needed: source.append('gui.on_change = on_change\n') source.append(f'main_page.set_content({name_of_md})\n') source.append('gui.run()\n') elif step == 'step_00' and line.startswith('from taipy'): source.append("from taipy.gui import Gui, Markdown\n") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": ['# !pip install taipy\n', '# !pip install scikit-learn\n', '# !pip install statsmodels'] }) notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": ['## Using Notebooks\n', 'Some functions will be used in the Getting Started for Notebooks that are primarly used for Notebooks (`gui.stop()`, `gui.run()`, `gui.on_change`, `set_content()`)\n', 'To have more explanation on these different functions, you can find the documentation related [here](https://docs.taipy.io/manuals/gui/notebooks/)\n', '**Warning**: Do not forget to stop your server when you are finished. You can do so by restarting your kernel.\n'] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(0, 10)] + ['step_10', 'step_11', 'step_12'] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\n') cell = "markdown" for line in split_text: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { "cells": [], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2)
|
from step_08 import * # Get all the scenarios already created all_scenarios = tp.get_scenarios() # Delete the scenarios that don't have a name attribute # All the scenarios of the previous steps do not have an associated name so they will be deleted, # this will not be the case for those created by this step [tp.delete(scenario.id) for scenario in all_scenarios if scenario.name is None] # Initial variable for the scenario selector # The list of possible values (lov) for the scenario selector is a list of tuples (scenario_id, scenario_name), # but the selected_scenario is just used to retrieve the scenario id and what gets displayed is the name of the scenario. scenario_selector = [(scenario.id, scenario.name) for scenario in tp.get_scenarios()] selected_scenario = None scenario_manager_page = page + """ # Create your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Create new scenario|button|on_action=create_scenario|> ## Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> ## Display the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_name_for_scenario(state) -> str: name = f"Scenario ({state.day.strftime('%A, %d %b')}; {state.max_capacity}; {state.n_predictions})" # Change the name if it is the same as some scenarios if name in [s[1] for s in state.scenario_selector]: name += f" ({len(state.scenario_selector)})" return name # Change the create_scenario function in order to change the default parameters # and allow the creation of multiple scenarios def create_scenario(state): print("Execution of scenario...") # Extra information for the scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario scenario = tp.create_scenario(scenario_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Submit the scenario that is currently selected submit_scenario(state) def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format (change?) day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes scenario.day.write(day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) scenario.creation_date = state.day # Execute the scenario tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Update the scenario selector state.scenario_selector += [(scenario.id, scenario.name)] def update_chart(state): # Now, the selected_scenario comes from the state, it is interactive scenario = tp.get(state.selected_scenario[0]) pipeline = scenario.pipelines[state.selected_pipeline] update_predictions_dataset(state, pipeline) def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update the chart when the scenario or the pipeline is changed # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": Gui(page=scenario_manager_page).run(dark_mode=False)
|
from step_07 import * # Initial variables ## Initial variables for the scenario day = dt.datetime(2021, 7, 26) n_predictions = 40 max_capacity = 200 page_scenario_manager = page + """ # Change your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Save changes|button|on_action={submit_scenario}|> Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action={update_chart}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): global selected_scenario print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) selected_scenario = scenario.id tp.submit(scenario) def submit_scenario(state): print("Submitting scenario...") # Get the selected scenario: in this current step a single scenario is created then modified here. scenario = tp.get(selected_scenario) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the datanodes scenario.day.write(state_day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) # Execute the pipelines/code tp.submit(scenario) # Update the chart when we change the scenario update_chart(state) def update_chart(state): # Select the right scenario and pipeline scenario = tp.get(selected_scenario) pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline update_predictions_dataset(state, pipeline) if __name__ == "__main__": global selected_scenario # Creation of a single scenario create_scenario() Gui(page=page_scenario_manager).run(dark_mode=False)
|
import datetime as dt import pandas as pd from taipy import Config, Scope from step_01 import path_to_csv # Datanodes (3.1) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", path=path_to_csv, scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id="day", default_data=dt.datetime(2021, 7, 26)) n_predictions_cfg = Config.configure_data_node(id="n_predictions", default_data=40) max_capacity_cfg = Config.configure_data_node(id="max_capacity", default_data=200) ## Remaining Data Nodes cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset", cacheable=True, validity_period=dt.timedelta(days=1), scope=Scope.GLOBAL) predictions_cfg = Config.configure_data_node(id="predictions", scope=Scope.PIPELINE) # Functions (3.2) def clean_data(initial_dataset: pd.DataFrame): print(" Cleaning data") # Convert the date column to datetime initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def predict_baseline(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting baseline") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset['Date'] < day] predictions = train_dataset['Value'][-n_predictions:].reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Tasks (3.3) clean_data_task_cfg = Config.configure_task(id="clean_data", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg) predict_baseline_task_cfg = Config.configure_task(id="predict_baseline", function=predict_baseline, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg)
|
from step_05 import * from step_06 import scenario_cfg from taipy import Config # Set the list of pipelines names # It will be used in a selector of pipelines pipeline_selector = ["baseline", "ml"] selected_pipeline = pipeline_selector[0] scenario_page = page + """ Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action=update_chart|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) scenario = submit_scenario(scenario) return scenario def submit_scenario(scenario): print("Submitting scenario...") tp.submit(scenario) return scenario def update_chart(state): print("'Update chart' button clicked") # Select the right pipeline pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline # It is the same function as created before in step_5 update_predictions_dataset(state, pipeline) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Creation of our first scenario scenario = create_scenario() Gui(page=scenario_page).run(dark_mode=False)
|
# For the sake of clarity, we have used an AutoRegressive model rather than a pure ML model such as: # Random Forest, Linear Regression, LSTM, etc from statsmodels.tsa.ar_model import AutoReg from taipy import Config from step_04 import * from step_03 import cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg, predictions_cfg, pd, dt # This is the function that will be used by the task def predict_ml(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting with ML") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset["Date"] < day] # Fit the AutoRegressive model model = AutoReg(train_dataset["Value"], lags=7).fit() # Get the n_predictions forecasts predictions = model.forecast(n_predictions).reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Create the task configuration of the predict_ml function. ## We use the same input and ouput as the previous predict_baseline task but we change the funtion predict_ml_task_cfg = Config.configure_task(id="predict_ml", function=predict_ml, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) # Create the new pipeline that will clean and predict with the ml model ml_pipeline_cfg = Config.configure_pipeline(id="ml", task_configs=[clean_data_task_cfg, predict_ml_task_cfg]) # Configure our scenario which is our business problem. scenario_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg]) # The configuration is now complete if __name__ == "__main__": # Create the scenario scenario = tp.create_scenario(scenario_cfg) # Execute it tp.submit(scenario) # Get the resulting scenario ## Print the predictions of the two pipelines (baseline and ml) print("\nBaseline predictions\n", scenario.baseline.predictions.read()) print("\nMachine Learning predictions\n", scenario.ml.predictions.read())
|
from step_01 import dataset, n_week, Gui # Select the week based on the slider value dataset_week = dataset[dataset["Date"].dt.isocalendar().week == n_week] page = """ # Getting started with Taipy Select week: *<|{n_week}|>* <|{n_week}|slider|min=1|max=52|> <|{dataset_week}|chart|type=bar|x=Date|y=Value|height=100%|width=100%|> """ # on_change is the function that is called when any variable is changed def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] if __name__ == "__main__": Gui(page=page).run(dark_mode=False)
|
from step_11 import * from sklearn.metrics import mean_absolute_error, mean_squared_error # Initial dataset for comparison comparison_scenario = pd.DataFrame({"Scenario Name": [], "RMSE baseline": [], "MAE baseline": [], "RMSE ML": [], "MAE ML": []}) # Indicates if the comparison is done comparison_scenario_done = False # Selector for metrics metric_selector = ["RMSE", "MAE"] selected_metric = metric_selector[0] def compute_metrics(historical_data, predicted_data): rmse = mean_squared_error(historical_data, predicted_data) mae = mean_absolute_error(historical_data, predicted_data) return rmse, mae def compare(state): print("Comparing...") # Initial lists for comparison scenario_names = [] rmses_baseline = [] maes_baseline = [] rmses_ml = [] maes_ml = [] # Go through all the primary scenarios all_scenarios = tp.get_primary_scenarios() all_scenarios_ordered = sorted(all_scenarios, key=lambda x: x.creation_date.timestamp()) for scenario in all_scenarios_ordered: print(f"Scenario {scenario.name}") # Go through all the pipelines for pipeline in scenario.pipelines.values(): print(f" Pipeline {pipeline.config_id}") # Get the predictions dataset with the historical data only_prediction_dataset = create_predictions_dataset(pipeline)[-pipeline.n_predictions.read():] # Series to compute the metrics (true values and predicted values) historical_values = only_prediction_dataset["Historical values"] predicted_values = only_prediction_dataset["Predicted values"] # Compute the metrics for this pipeline and primary scenario rmse, mae = compute_metrics(historical_values, predicted_values) # Add values to the appropriate lists if "baseline" in pipeline.config_id: rmses_baseline.append(rmse) maes_baseline.append(mae) elif "ml" in pipeline.config_id: rmses_ml.append(rmse) maes_ml.append(mae) scenario_names.append(scenario.creation_date.strftime("%A %d %b")) # Update comparison_scenario state.comparison_scenario = pd.DataFrame({"Scenario Name": scenario_names, "RMSE baseline": rmses_baseline, "MAE baseline": maes_baseline, "RMSE ML": rmses_ml, "MAE ML": maes_ml}) # When comparison_scenario_done will be set to True, # the part with the graphs will be finally rendered state.comparison_scenario_done = True # Performance page page_performance = """ <br/> <|part|render={comparison_scenario_done}| <|Table|expanded=False|expandable| <|{comparison_scenario}|table|width=100%|> |> <|{selected_metric}|selector|lov={metric_selector}|dropdown|> <|part|render={selected_metric=="RMSE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=RMSE baseline|y[2]=RMSE ML|height=100%|width=100%|> |> <|part|render={selected_metric=="MAE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=MAE baseline|y[2]=MAE ML|height=100%|width=100%|> |> |> <center> <|Compare primarys|button|on_action=compare|> </center> """ # Add the page_performance section to the menu multi_pages = """ <|menu|label=Menu|lov={["Data Visualization", "Scenario Manager", "Performance"]}|on_action=menu_fct|> <|part|render={page=="Data Visualization"}|""" + page_data_visualization + """|> <|part|render={page=="Scenario Manager"}|""" + page_scenario_manager + """|> <|part|render={page=="Performance"}|""" + page_performance + """|> """ if __name__ == "__main__": Gui(page=multi_pages).run(dark_mode=False)
|
import numpy as np import pandas as pd from step_04 import tp, baseline_pipeline_cfg, dt from step_02 import * # Initialize the "predictions" dataset predictions_dataset = pd.DataFrame( {"Date": [dt.datetime(2021, 6, 1)], "Historical values": [np.NaN], "Predicted values": [np.NaN]}) # Add a button and a chart for our predictions pipeline_page = page + """ Press <|predict|button|on_action=predict|> to predict with default parameters (30 predictions) and June 1st as day. <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def predict(state): print("'Predict' button clicked") pipeline = create_and_submit_pipeline() update_predictions_dataset(state, pipeline) def create_and_submit_pipeline(): print("Execution of pipeline...") # Create the pipeline from the pipeline config pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(pipeline) return pipeline def create_predictions_dataset(pipeline): print("Creating predictions dataset...") # Read data from the pipeline predictions = pipeline.predictions.read() day = pipeline.day.read() n_predictions = pipeline.n_predictions.read() cleaned_data = pipeline.cleaned_dataset.read() # Set arbitrarily the time window for the chart as 5 times the number of predictions window = 5 * n_predictions # Create the historical dataset that will be displayed new_length = len(cleaned_data[cleaned_data["Date"] < day]) + n_predictions temp_df = cleaned_data[:new_length] temp_df = temp_df[-window:].reset_index(drop=True) # Create the series that will be used in the concat historical_values = pd.Series(temp_df["Value"], name="Historical values") predicted_values = pd.Series([np.NaN] * len(temp_df), name="Predicted values") predicted_values[-len(predictions):] = predictions # Create the predictions dataset # Columns : [Date, Historical values, Predicted values] return pd.concat([temp_df["Date"], historical_values, predicted_values], axis=1) def update_predictions_dataset(state, pipeline): print("Updating predictions dataset...") state.predictions_dataset = create_predictions_dataset(pipeline) if __name__ == "__main__": Gui(page=pipeline_page).run(dark_mode=False)
|
from taipy import Gui import pandas as pd def get_data(path_to_csv: str): # pandas.read_csv() returns a pd.DataFrame dataset = pd.read_csv(path_to_csv) dataset["Date"] = pd.to_datetime(dataset["Date"]) return dataset # Read the dataframe path_to_csv = "dataset.csv" dataset = get_data(path_to_csv) # Initial value n_week = 10 # Definition of the page page = """ # Getting started with Taipy Week number: *<|{n_week}|>* Interact with this slider to change the week number: <|{n_week}|slider|min=1|max=52|> ## Dataset: Display the last three months of data: <|{dataset[9000:]}|chart|type=bar|x=Date|y=Value|height=100%|> <|{dataset}|table|height=400px|width=95%|> """ if __name__ == "__main__": # Create a Gui object with our page content Gui(page=page).run(dark_mode=False)
|
from step_10 import * from step_06 import ml_pipeline_cfg from taipy import Config, Frequency from taipy.gui import notify # Create scenarios each week and compare them scenario_daily_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg], frequency=Frequency.DAILY) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Change the inital scenario selector to see which scenarios are primary scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] # Redefine update_scenario_selector to add "*" in the display name when the scnario is primary def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Create the scenario name for the scenario selector # This name changes dependind whether the scenario is primary or not scenario_name = ("*" if scenario.is_primary else "") + scenario.name print(scenario_name) # Update the scenario selector state.scenario_selector += [(scenario.id, scenario_name)] selected_scenario_is_primary = None # Change the create_scenario function to create a scenario with the selected frequency def create_scenario(state): print("Execution of scenario...") # Extra information for scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario with the week cycle scenario = tp.create_scenario(scenario_daily_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Change the scenario that is currently selected submit_scenario(state) # This is the same code as in step_9_dynamic_scenario_creation.py def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes # if state.day != scenario.day.read(): scenario.day.write(state_day) # if int(state.n_predictions) != scenario.n_predictions.read(): scenario.n_predictions.write(int(state.n_predictions)) # if state.max_capacity != scenario.max_capacity.read(): scenario.max_capacity.write(int(state.max_capacity)) # if state.day != scenario.creation_date: scenario.creation_date = state.day # Execute the pipelines/code tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def make_primary(state): print("Making the current scenario primary...") scenario = tp.get(state.selected_scenario[0]) # Take the current scenario primary tp.set_primary(scenario) # Update the scenario selector accordingly state.scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] state.selected_scenario_is_primary = True def remove_scenario_from_selector(state, scenario: list): # Take all the scenarios in the selector that doesn't have the scenario.id state.scenario_selector = [(s[0], s[1]) for s in state.scenario_selector if s[0] != scenario.id] state.selected_scenario = state.scenario_selector[-1] def delete_scenario(state): scenario = tp.get(state.selected_scenario[0]) if scenario.is_primary: # Notify the user that primary scenarios can not be deleted notify(state, "info", "Cannot delete the primary scenario") else: # Delete the scenario and the related objects (datanodes, tasks, jobs,...) tp.delete(scenario.id) # Update the scenario selector accordingly remove_scenario_from_selector(state, scenario) # Add a "Delete scenario" and a "Make primary" buttons page_scenario_manager = """ # Create your scenario: <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/> <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <br/> <br/> <br/> <br/> <|Delete scenario|button|on_action=delete_scenario|active={len(scenario_selector)>0}|> <|Make primary|button|on_action=make_primary|active={not(selected_scenario_is_primary) and len(scenario_selector)>0}|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ # Redefine the multi_pages multi_pages = """ <|menu|label=Menu|lov={["Data Visualization", "Scenario Manager"]}|on_action=menu_fct|> <|part|render={page=="Data Visualization"}|""" + page_data_visualization + """|> <|part|render={page=="Scenario Manager"}|""" + page_scenario_manager + """|> """ def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update selected_scenario_is_primary indicating if the current scenario is primary or not state.selected_scenario_is_primary = tp.get(state.selected_scenario[0]).is_primary # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": Gui(page=multi_pages).run(dark_mode=False)
|
from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page="# Getting started with *Taipy*").run(dark_mode=False)
|
from step_09 import * # Our first page is the original page # (with the slider and the chart that displays a week of the historical data) page_data_visualization = page # Second page: create scenarios and display results page_scenario_manager = """ # Create your scenario <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/>\n <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ # Create a menu with our pages multi_pages = """ <|menu|label=Menu|lov={["Data Visualization", "Scenario Manager"]}|on_action=menu_fct|> <|part|render={page=="Data Visualization"}|""" + page_data_visualization + """|> <|part|render={page=="Scenario Manager"}|""" + page_scenario_manager + """|> """ # The initial page is the "Data Visualization" page page = "Data Visualization" def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page state.page = var_value["args"][0] if __name__ == "__main__": Gui(page=multi_pages).run(dark_mode=False)
|
import taipy as tp from step_03 import Config, clean_data_task_cfg, predict_baseline_task_cfg, dt # Create the first pipeline configuration baseline_pipeline_cfg = Config.configure_pipeline(id="baseline", task_configs=[clean_data_task_cfg, predict_baseline_task_cfg]) ## Execute the "baseline" pipeline if __name__ == "__main__": # Create the pipeline baseline_pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(baseline_pipeline) # Read output data from the pipeline baseline_predictions = baseline_pipeline.predictions.read() print("Predictions of baseline algorithm\n", baseline_predictions)
|
from taipy.gui import Gui from keras.models import load_model from PIL import Image import numpy as np class_names = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck', } model = load_model("Neural Network Notebook/Cifar10Model.keras") def predict_image(model, path_to_img): img = Image.open(path_to_img) img = img.convert("RGB") img = img.resize((32, 32)) data = np.asarray(img) data = data / 255 probs = model.predict(np.array([data])[:1]) top_prob = probs.max() top_pred = class_names[np.argmax(probs)] return top_prob, top_pred content = "" img_path = "placeholder_image.png" prob = 0 pred = "" index = """ <|text-center| <|{"logo.png"}|image|width=16vw|> <|{content}|file_selector|extensions=.png|> select an image from your file system <|{pred}|> <|{img_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > """ def on_change(state, var_name, var_val): if var_name == "content": top_prob, top_pred = predict_image(model, var_val) state.prob = round(top_prob * 100) state.pred = "this is a " + top_pred state.img_path = var_val #print(var_name, var_val) app = Gui(page=index) if __name__ == "__main__": app.run(use_reloader=True)
|
import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error import numpy as np # Import numpy for RMSE calculation from prophet import Prophet def build_message(name: str): return f"Hello {name}!" def clean_data(initial_dataset: pd.DataFrame): return initial_dataset def retrained_model(cleaned_dataset: pd.DataFrame): # Split the dataset into features (X) and target (y) X = cleaned_dataset.drop('Claim_Amount', axis=1) y = cleaned_dataset['Claim_Amount'] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Define the categorical columns for one-hot encoding categorical_cols = ['Procedure_Code', 'Diagnosis_Code', 'Provider_Specialty', 'Insurance_Plan'] # Create a column transformer preprocessor = ColumnTransformer( transformers=[ ('cat', OneHotEncoder(drop='first'), categorical_cols) ], remainder='passthrough' ) # Create a pipeline with preprocessing and the Random Forest Regressor model = Pipeline([ ('preprocessor', preprocessor), ('regressor', RandomForestRegressor(n_estimators=100, random_state=42)) ]) # Fit the model on the training data model.fit(X_train, y_train) # Make predictions on the test set predictions = model.predict(X_test) # Calculate Mean Squared Error (MSE) mse = mean_squared_error(y_test, predictions) # Calculate Root Mean Squared Error (RMSE) rmse = np.sqrt(mse) # Print the RMSE print(f"Mean Squared Error: {mse}") print(f"Root Mean Squared Error (RMSE): {rmse}") return model def predict(model): # Example: Make a prediction for a new patient new_patient_data = pd.DataFrame({ 'Procedure_Code': ['CPT456'], 'Diagnosis_Code': ['ICD-10-B'], 'Provider_Specialty': ['Orthopedics'], 'Patient_Age': [35], 'Insurance_Plan': ['PPO'], 'Deductible': [200], 'Copayment': [30], 'Coinsurance': [20], }, index=[0]) # Predict the claim amount for the new patient new_patient_claim = model.predict(new_patient_data) print(f"Predicted Claim Amount for New Patient: ${new_patient_claim[0]:.2f}")
|
import taipy as tp from taipy.core.config import Config Config.load('my_config.toml') scenario_cfg = Config.scenarios['scenario'] if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) print("submitting") scenario_1.submit() print("submit shit ayyindhi")
|
from taipy.gui import Html html_page = Html(""" <head> <script src="https://maps.googleapis.com/maps/api/js?key=AIzaSyBIeklfsRu1yz97lY2gJzWHJcmrd7lx2zU&libraries=places"></script> <script type="text/javascript"> function initialize() { geocoder = new google.maps.Geocoder(); var mapOptions = { } var locations = ["12836 University Club Dr", "2204 Fitness Club Way"]; var markers = []; var iterator = 0; var bounds = new google.maps.LatLngBounds(); for (var i = 0; i < locations.length; i++) { setTimeout(function() { geocoder.geocode({'address': locations[iterator]}, function(results, status){ if (status == google.maps.GeocoderStatus.OK) { var marker = new google.maps.Marker({ map: map, position: results[0].geometry.location, animation: google.maps.Animation.DROP }); bounds.extend(marker.getPosition()); map.fitBounds(bounds); } else { console.log('Geocode was not successful for the following reason: ' + status); } }); iterator++; }, i * 250); } var map = new google.maps.Map(document.getElementById("map-canvas"), mapOptions); } google.maps.event.addDomListener(window, 'load', initialize); </script> </head> <body> <div id="map-canvas" style="width: 100%; height: 400px;"></div> </body> """)
|
from geopy.geocoders import Nominatim import folium user_agent = "geoapiExercises/1.0 AIzaSyBIeklfsRu1yz97lY2gJzWHJcmrd7lx2zU" # Initialize the geocoder with the user agent geolocator = Nominatim(user_agent=user_agent, timeout=10) # List of locations to geocode locations = ["Denver, CO, United States", "New York, NY, United States", "Los Angeles, CA, United States"] # Create an empty map map_location = folium.Map(location=[0, 0], zoom_start=5) # Iterate through the list of locations for location in locations: # Perform geocoding location_info = geolocator.geocode(location) if location_info: # Extract latitude and longitude latitude = location_info.latitude longitude = location_info.longitude # Add a marker for the geocoded location folium.Marker([latitude, longitude], popup=location).add_to(map_location) else: print(f"Geocoding was not successful for the location: {location}") # Save or display the map (as an HTML file) map_location.save("geocoded_locations_map.html") print("Map created and saved as 'geocoded_locations_map.html'")
|
from taipy.gui import Gui, notify import pandas as pd import yfinance as yf from taipy.config import Config import taipy as tp import datetime as dt from taipy import Core from show_hospitals_map import html_page from flask import Flask, request, session, jsonify, redirect, render_template from flask_restful import Api, Resource import requests Config.load("config_model_train.toml") scenario_cfg = Config.scenarios['stock'] tickers = yf.Tickers("msft aapl goog") root_md = "<|navbar|>" property_chart = { "type": "lines", "x": "Date", "y[1]": "Open", "y[2]": "Close", "y[3]": "High", "y[4]": "Low", "color[1]": "green", "color[2]": "grey", "color[3]": "red", "color[4]": "yellow", } df = pd.DataFrame([], columns=["Date", "High", "Low", "Open", "Close"]) df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) stock = "" stock_text = "No Stock to Show" chart_text = "No Chart to Show" stocks = [] page = """ # Stock Portfolio ### Choose the stock to show <|toggle|theme|> <|layout|columns=1 1| <| <|{stock_text}|> <|{stock}|selector|lov=MSFT;AAPL;GOOG;Reset|dropdown|> <|Press for Stock|button|on_action=on_button_action|> <|Get the future predictions|button|on_action=get_predictions|> |> <|{stock} <|{chart_text}|> <|{df}|chart|properties={property_chart}|> |> |> """ pages = { "/" : root_md, "home" : page, "claim": "empty page" } def on_button_action(state): if state.stock == "Reset": state.stock_text = "No Stock to Show" state.chart_text = "No Chart to Show" state.df = pd.DataFrame([], columns=["Date", "High", "Low", "Open", "Close"]) state.df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) state.pred_text = "No Prediction to Show" else: state.stock_text = f"The stock is {state.stock}" state.chart_text = f"Monthly history of stock {state.stock}" state.df = tickers.tickers[state.stock].history().reset_index() state.df.to_csv(f"{stock}.csv", index=False) def get_predictions(state): scenario_stock = tp.create_scenario(scenario_cfg) scenario_stock.initial_dataset.path = f"{stock}".csv notify(state, 'success', 'camehere') scenario_stock.write(state.df) tp.submit(scenario_stock) state.df_pred = scenario_stock.predictions.read() state.df_pred.to_csv("pred.csv", index=False) tp.Core().run() # Gui(pages=pages).run(use_reloader=True) app = Flask(__name__) # app = Flask(__name__) app.secret_key = "your_secret_key" # Set a secret key for session management api = Api(app) class SignupResource(Resource): def get(self): return redirect("/signup.html") def post(self): SIGNUP_API_URL = "https://health-insurance-rest-apis.onrender.com/api/signup" signup_data = { 'username': request.form['username'], 'password': request.form['password'], 'email': request.form['email'] } headers = { 'Content-Type': 'application/json' } print(signup_data) response = requests.post(SIGNUP_API_URL, headers=headers, json=signup_data) print("response", response) if response.status_code == 200: return redirect("/login.html") else: return 'Signup Failed' # Login Resource class LoginResource(Resource): def get(self): """ Return a simple login page HTML """ return redirect("/login.html") def post(self): email = request.form['email'] password = request.form['password'] auth_data = { 'username': email, 'password': password } AUTH_API_URL = "https://health-insurance-rest-apis.onrender.com/api/login" response = requests.post(AUTH_API_URL, json=auth_data) if response.status_code == 200: auth_data = response.json() access_token = auth_data.get('access_token') refresh_token = auth_data.get('refresh_token') # Store tokens in the session session['access_token'] = access_token session['refresh_token'] = refresh_token return redirect("/home") else: return 'Login failed', 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 print("registered the apis") # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') api.add_resource(SignupResource, '/signup') @app.before_request def check_access_token(): # print ('access_token' in session, "checkIt") if request.endpoint != 'login' and 'access_token' not in session: # # Redirect to the login page if not on the login route and no access_token is in the session # print(request.endpoint, "endpoint") return redirect("/login") gui = Gui(pages=pages, flask=app).run(debug=False)
|
from taipy import Config, Scope import pandas as pd from prophet import Prophet from functions import * # Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", default_path='df.csv') cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset") clean_data_task_cfg = Config.configure_task(id="clean_data_task", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) model_training_cfg = Config.configure_data_node(id="model_output") predictions_cfg = Config.configure_data_node(id="predictions") model_training_task_cfg = Config.configure_task(id="model_retraining_task", function=retrained_model, input=cleaned_dataset_cfg, output=model_training_cfg, skippable=True) predict_task_cfg = Config.configure_task(id="predict_task", function=predict, input=model_training_cfg, output=predictions_cfg, skippable=True) # Create the first pipeline configuration # retraining_model_pipeline_cfg = Config.configure_pipeline( # id="model_retraining_pipeline", # task_configs=[clean_data_task_cfg, model_training_task_cfg], # ) # Run the Taipy Core service # import taipy as tp # # Run of the Taipy Core service # tp.Core().run() # # Create the pipeline # retrain_pipeline = tp.create_pipeline(retraining_model_pipeline_cfg) # # Submit the pipeline # tp.submit(retrain_pipeline) # tp.Core().stop() scenario_cfg = Config.configure_scenario_from_tasks(id="stock", task_configs=[clean_data_task_cfg, model_training_task_cfg, predict_task_cfg]) # tp.Core().run() # tp.submit(scenario_cfg) Config.export("config_model_train.toml")
|
from taipy import Config from functions import build_message name_data_node_cfg = Config.configure_data_node(id="name") message_data_node_cfg = Config.configure_data_node(id="message") build_msg_task_cfg = Config.configure_task("build_msg", build_message, name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario_from_tasks("scenario", task_configs=[build_msg_task_cfg]) Config.export('my_config.toml')
|
from functools import wraps import jwt from flask import request, abort from flask import current_app def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if "Authorization" in request.headers: token = request.headers["Authorization"].split(" ")[1] if not token: return { "message": "Authentication Token is missing!", "data": None, "error": "Unauthorized" }, 401 try: # data=jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["RS256"]) print("got the token") # current_user=models.User().get_by_id(data["user_id"]) current_user = 12 if current_user is None: return { "message": "Invalid Authentication token!", "data": None, "error": "Unauthorized" }, 401 if not current_user["active"]: abort(403) except Exception as e: return { "message": "Something went wrong", "data": None, "error": str(e) }, 500 return f(current_user, *args, **kwargs) return decorated
|
from flask import Flask, request, session, jsonify from flask_restful import Api, Resource app = Flask(__name__) app.secret_key = "your_secret_key" # Set a secret key for session management api = Api(app) # Dummy user data for demonstration users = { 'maneesh': {'password': 'securepassword'} } # Login Resource class LoginResource(Resource): def post(self): data = request.get_json() username = data.get('username') password = data.get('password') print("hello") # Check if user exists and password is correct if username in users and users[username]['password'] == password: # Simulate receiving a JWT token from a third-party API jwt_token = "your_received_jwt_token" # Store the JWT token in the session session['jwt_token'] = jwt_token return {'message': 'Login successful'}, 200 else: return {'message': 'Invalid credentials'}, 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') if __name__ == '__main__': app.run(debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.