text
stringlengths 0
24.9k
|
|---|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
class ConfigIdMissingException(Exception): def __init__(self): self.message = "Config id is missing." class ScenarioIdMissingException(Exception): def __init__(self): self.message = "Scenario id is missing." class SequenceNameMissingException(Exception): def __init__(self): self.message = "Sequence name is missing."
|
from marshmallow import Schema, fields class CycleSchema(Schema): name = fields.String() frequency = fields.String() properties = fields.Dict() creation_date = fields.String() start_date = fields.String() end_date = fields.String() class CycleResponseSchema(CycleSchema): id = fields.String()
|
from marshmallow import Schema, fields class TaskSchema(Schema): config_id = fields.String() id = fields.String() owner_id = fields.String() parent_ids = fields.List(fields.String) input_ids = fields.List(fields.String) function_name = fields.String() function_module = fields.String() output_ids = fields.List(fields.String) version = fields.String()
|
from marshmallow import Schema, fields class CallableSchema(Schema): fct_name = fields.String() fct_module = fields.String() class JobSchema(Schema): id = fields.String() task_id = fields.String() status = fields.String() force = fields.Boolean() creation_date = fields.String() subscribers = fields.Nested(CallableSchema) stacktrace = fields.List(fields.String)
|
from marshmallow import Schema, fields class SequenceSchema(Schema): owner_id = fields.String() parent_ids = fields.List(fields.String) tasks = fields.List(fields.String) version = fields.String() properties = fields.Dict() class SequenceResponseSchema(SequenceSchema): id = fields.String() subscribers = fields.List(fields.Dict)
|
from .cycle import CycleResponseSchema, CycleSchema from .datanode import ( CSVDataNodeConfigSchema, DataNodeConfigSchema, DataNodeFilterSchema, DataNodeSchema, ExcelDataNodeConfigSchema, GenericDataNodeConfigSchema, InMemoryDataNodeConfigSchema, JSONDataNodeConfigSchema, MongoCollectionDataNodeConfigSchema, PickleDataNodeConfigSchema, SQLDataNodeConfigSchema, SQLTableDataNodeConfigSchema, ) from .job import JobSchema from .scenario import ScenarioResponseSchema, ScenarioSchema from .sequence import SequenceResponseSchema, SequenceSchema from .task import TaskSchema __all__ = [ "DataNodeSchema", "DataNodeFilterSchema", "TaskSchema", "SequenceSchema", "SequenceResponseSchema", "ScenarioSchema", "ScenarioResponseSchema", "CycleSchema", "CycleResponseSchema", "JobSchema", ]
|
from marshmallow import Schema, fields, pre_dump class DataNodeSchema(Schema): config_id = fields.String() scope = fields.String() id = fields.String() storage_type = fields.String() name = fields.String() owner_id = fields.String() parent_ids = fields.List(fields.String) last_edit_date = fields.String() job_ids = fields.List(fields.String) version = fields.String() cacheable = fields.Boolean() validity_days = fields.Float() validity_seconds = fields.Float() edit_in_progress = fields.Boolean() properties = fields.Dict() class DataNodeConfigSchema(Schema): name = fields.String() storage_type = fields.String() scope = fields.Integer() cacheable = fields.Boolean() @pre_dump def serialize_scope(self, obj, **kwargs): obj.scope = obj.scope.value return obj class CSVDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() has_header = fields.Boolean() class InMemoryDataNodeConfigSchema(DataNodeConfigSchema): default_data = fields.Inferred() class PickleDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() default_data = fields.Inferred() class SQLTableDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() table_name = fields.String() class SQLDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() read_query = fields.String() write_query = fields.List(fields.String()) class MongoCollectionDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() collection_name = fields.String() class ExcelDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() has_header = fields.Boolean() sheet_name = fields.String() class GenericDataNodeConfigSchema(DataNodeConfigSchema): pass class JSONDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() class OperatorSchema(Schema): key = fields.String() value = fields.Inferred() operator = fields.String() class DataNodeFilterSchema(DataNodeConfigSchema): operators = fields.List(fields.Nested(OperatorSchema)) join_operator = fields.String(default="AND")
|
from marshmallow import Schema, fields class ScenarioSchema(Schema): sequences = fields.Dict() properties = fields.Dict() primary_scenario = fields.Boolean(default=False) tags = fields.List(fields.String) version = fields.String() class ScenarioResponseSchema(ScenarioSchema): id = fields.String() subscribers = fields.List(fields.Dict) cycle = fields.String() creation_date = fields.String()
|
from importlib import util import inspect import os if util.find_spec("taipy") and util.find_spec("taipy.gui"): from taipy.gui import Gui taipy_path = f"{os.path.dirname(os.path.dirname(inspect.getfile(Gui)))}" potential_file_paths = [ f"{taipy_path}{os.sep}gui{os.sep}viselements.json", f"{taipy_path}{os.sep}gui_core{os.sep}viselements.json", ] if potential_file_paths := [ path for path in potential_file_paths if os.path.exists(path) ]: print(f"Path: {';;;'.join(potential_file_paths)}") else: print("Visual element descriptor files not found in taipy-gui package") else: print("taipy-gui package is not installed within the selected python environment")
|
import taipy as tp import pandas as pd from taipy import Config from taipy.gui import Gui, Markdown, notify Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() input_text_cfg = Config.configure_data_node(id="input_text") text_length_cfg = Config.configure_data_node(id="text_length") count_characters_cfg = Config.configure_task(id="count_characters", function=len, input=input_text_cfg, output=text_length_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id="count_characters", task_configs=[count_characters_cfg]) scenario_list = tp.get_scenarios() input_text = "" main_md = Markdown(""" # Taipy Character Counter Enter Text: <|{input_text}|input|> <|Submit|button|on_action=submit|> ---------- Past Results: <|{create_results_table(scenario_list)}|table|width=fit-content|> """) def submit(state): scenario = tp.create_scenario(scenario_cfg) scenario.input_text.write(state.input_text) state.input_text = "" tp.submit(scenario, wait=True) notify(state, "S", "Submitted!") state.scenario_list = tp.get_scenarios() def create_results_table(scenario_list): table = [(s.id, s.input_text.read(), s.text_length.read()) for s in scenario_list] df = pd.DataFrame(table, columns=["id", "input_text", "text_length"]) print(df) return df tp.Core().run() gui = Gui(main_md) gui.run(run_browser=False)
|
import json def add_line(source, line, step): line = line.replace('Getting Started with Taipy', 'Getting Started with Taipy on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/en/latest/getting_started/src/') line = line.replace('(dataset.csv)', '(https://docs.taipy.io/en/latest/getting_started/step_01/dataset.csv)') if line.startswith('[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('<div align="center">\n') source.append(f' <img src={img_src} {width}>\n') source.append('</div>\n') elif step == 'step_00' and line.startswith('Gui(page='): source.append('\n') source.append('Gui("# Getting Started with Taipy").run(dark_mode=False)\n') elif line.startswith('Gui(page=') and step != 'step_00': search_for_md = line.split(')') name_of_md = search_for_md[0][9:] source.append(f'gui = Gui({name_of_md})\n') source.append(f'gui.run()\n') elif step == 'step_00' and line.startswith('from taipy'): source.append("from taipy.gui import Gui, Markdown\n") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": ['# !pip install taipy\n', '# !pip install scikit-learn\n', '# !pip install statsmodels'] }) notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": ['## Using Notebooks\n',] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(0, 10)] + ['step_10', 'step_11', 'step_12'] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\n') cell = "markdown" for line in split_text: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { "cells": [], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2)
|
from step_08 import * # Get all the scenarios already created all_scenarios = tp.get_scenarios() # Delete the scenarios that don't have a name attribute # All the scenarios of the previous steps do not have an associated name so they will be deleted, # this will not be the case for those created by this step [tp.delete(scenario.id) for scenario in all_scenarios if scenario.name is None] # Initial variable for the scenario selector # The list of possible values (lov) for the scenario selector is a list of tuples (scenario_id, scenario_name), # but the selected_scenario is just used to retrieve the scenario id and what gets displayed is the name of the scenario. scenario_selector = [(scenario.id, scenario.name) for scenario in tp.get_scenarios()] selected_scenario = None scenario_manager_page = page + """ # Create your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Create new scenario|button|on_action=create_scenario|> ## Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> ## Display the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_name_for_scenario(state) -> str: name = f"Scenario ({state.day.strftime('%A, %d %b')}; {state.max_capacity}; {state.n_predictions})" # Change the name if it is the same as some scenarios if name in [s[1] for s in state.scenario_selector]: name += f" ({len(state.scenario_selector)})" return name # Change the create_scenario function in order to change the default parameters # and allow the creation of multiple scenarios def create_scenario(state): print("Execution of scenario...") # Extra information for the scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario scenario = tp.create_scenario(scenario_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Submit the scenario that is currently selected submit_scenario(state) def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format (change?) day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes scenario.day.write(day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) scenario.creation_date = state.day # Execute the scenario tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Update the scenario selector state.scenario_selector += [(scenario.id, scenario.name)] def update_chart(state): # Now, the selected_scenario comes from the state, it is interactive scenario = tp.get(state.selected_scenario[0]) pipeline = scenario.pipelines[state.selected_pipeline] update_predictions_dataset(state, pipeline) def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update the chart when the scenario or the pipeline is changed # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": tp.Core().run() Gui(page=scenario_manager_page).run(dark_mode=False)
|
from step_07 import * # Initial variables ## Initial variables for the scenario day = dt.datetime(2021, 7, 26) n_predictions = 40 max_capacity = 200 page_scenario_manager = page + """ # Change your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Save changes|button|on_action={submit_scenario}|> Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action={update_chart}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): global selected_scenario print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) selected_scenario = scenario.id tp.submit(scenario) def submit_scenario(state): print("Submitting scenario...") # Get the selected scenario: in this current step a single scenario is created then modified here. scenario = tp.get(selected_scenario) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the datanodes scenario.day.write(state_day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) # Execute the pipelines/code tp.submit(scenario) # Update the chart when we change the scenario update_chart(state) def update_chart(state): # Select the right scenario and pipeline scenario = tp.get(selected_scenario) pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline update_predictions_dataset(state, pipeline) if __name__ == "__main__": global selected_scenario tp.Core().run() # Creation of a single scenario create_scenario() Gui(page=page_scenario_manager).run(dark_mode=False)
|
import datetime as dt import pandas as pd from taipy import Config, Scope from step_01 import path_to_csv # Datanodes (3.1) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", path=path_to_csv, scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id="day", default_data=dt.datetime(2021, 7, 26)) n_predictions_cfg = Config.configure_data_node(id="n_predictions", default_data=40) max_capacity_cfg = Config.configure_data_node(id="max_capacity", default_data=200) ## Remaining Data Nodes cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset", validity_period=dt.timedelta(days=1), scope=Scope.GLOBAL) predictions_cfg = Config.configure_data_node(id="predictions", scope=Scope.PIPELINE) # Functions (3.2) def clean_data(initial_dataset: pd.DataFrame): print(" Cleaning data") # Convert the date column to datetime initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def predict_baseline(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting baseline") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset['Date'] < day] predictions = train_dataset['Value'][-n_predictions:].reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Tasks (3.3) clean_data_task_cfg = Config.configure_task(id="clean_data", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) predict_baseline_task_cfg = Config.configure_task(id="predict_baseline", function=predict_baseline, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg)
|
from step_05 import * from step_06 import scenario_cfg from taipy import Config # Set the list of pipelines names # It will be used in a selector of pipelines pipeline_selector = ["baseline", "ml"] selected_pipeline = pipeline_selector[0] scenario_page = page + """ Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action=update_chart|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) scenario = submit_scenario(scenario) return scenario def submit_scenario(scenario): print("Submitting scenario...") tp.submit(scenario) return scenario def update_chart(state): print("'Update chart' button clicked") # Select the right pipeline pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline # It is the same function as created before in step_5 update_predictions_dataset(state, pipeline) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() tp.Core().run() # Creation of our first scenario scenario = create_scenario() Gui(page=scenario_page).run(dark_mode=False)
|
# For the sake of clarity, we have used an AutoRegressive model rather than a pure ML model such as: # Random Forest, Linear Regression, LSTM, etc from statsmodels.tsa.ar_model import AutoReg from taipy import Config from step_04 import * from step_03 import cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg, predictions_cfg, pd, dt # This is the function that will be used by the task def predict_ml(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting with ML") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset["Date"] < day] # Fit the AutoRegressive model model = AutoReg(train_dataset["Value"], lags=7).fit() # Get the n_predictions forecasts predictions = model.forecast(n_predictions).reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Create the task configuration of the predict_ml function. ## We use the same input and ouput as the previous predict_baseline task but we change the funtion predict_ml_task_cfg = Config.configure_task(id="predict_ml", function=predict_ml, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) # Create the new pipeline that will clean and predict with the ml model ml_pipeline_cfg = Config.configure_pipeline(id="ml", task_configs=[clean_data_task_cfg, predict_ml_task_cfg]) # Configure our scenario which is our business problem. scenario_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg]) # The configuration is now complete if __name__ == "__main__": tp.Core().run() # Create the scenario scenario = tp.create_scenario(scenario_cfg) # Execute it tp.submit(scenario) # Get the resulting scenario ## Print the predictions of the two pipelines (baseline and ml) print("\nBaseline predictions\n", scenario.baseline.predictions.read()) print("\nMachine Learning predictions\n", scenario.ml.predictions.read())
|
from step_01 import dataset, n_week, Gui # Select the week based on the slider value dataset_week = dataset[dataset["Date"].dt.isocalendar().week == n_week] page = """ # Getting started with Taipy Select week: *<|{n_week}|>* <|{n_week}|slider|min=1|max=52|> <|{dataset_week}|chart|type=bar|x=Date|y=Value|height=100%|width=100%|> """ # on_change is the function that is called when any variable is changed def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] if __name__ == "__main__": Gui(page=page).run(dark_mode=False)
|
from step_11 import * from sklearn.metrics import mean_absolute_error, mean_squared_error # Initial dataset for comparison comparison_scenario = pd.DataFrame({"Scenario Name": [], "RMSE baseline": [], "MAE baseline": [], "RMSE ML": [], "MAE ML": []}) # Indicates if the comparison is done comparison_scenario_done = False # Selector for metrics metric_selector = ["RMSE", "MAE"] selected_metric = metric_selector[0] def compute_metrics(historical_data, predicted_data): rmse = mean_squared_error(historical_data, predicted_data) mae = mean_absolute_error(historical_data, predicted_data) return rmse, mae def compare(state): print("Comparing...") # Initial lists for comparison scenario_names = [] rmses_baseline = [] maes_baseline = [] rmses_ml = [] maes_ml = [] # Go through all the primary scenarios all_scenarios = tp.get_primary_scenarios() all_scenarios_ordered = sorted(all_scenarios, key=lambda x: x.creation_date.timestamp()) for scenario in all_scenarios_ordered: print(f"Scenario {scenario.name}") # Go through all the pipelines for pipeline in scenario.pipelines.values(): print(f" Pipeline {pipeline.config_id}") # Get the predictions dataset with the historical data only_prediction_dataset = create_predictions_dataset(pipeline)[-pipeline.n_predictions.read():] # Series to compute the metrics (true values and predicted values) historical_values = only_prediction_dataset["Historical values"] predicted_values = only_prediction_dataset["Predicted values"] # Compute the metrics for this pipeline and primary scenario rmse, mae = compute_metrics(historical_values, predicted_values) # Add values to the appropriate lists if "baseline" in pipeline.config_id: rmses_baseline.append(rmse) maes_baseline.append(mae) elif "ml" in pipeline.config_id: rmses_ml.append(rmse) maes_ml.append(mae) scenario_names.append(scenario.creation_date.strftime("%A %d %b")) # Update comparison_scenario state.comparison_scenario = pd.DataFrame({"Scenario Name": scenario_names, "RMSE baseline": rmses_baseline, "MAE baseline": maes_baseline, "RMSE ML": rmses_ml, "MAE ML": maes_ml}) # When comparison_scenario_done will be set to True, # the part with the graphs will be finally rendered state.comparison_scenario_done = True # Performance page page_performance = """ <br/> <|part|render={comparison_scenario_done}| <|Table|expanded=False|expandable| <|{comparison_scenario}|table|width=100%|> |> <|{selected_metric}|selector|lov={metric_selector}|dropdown|> <|part|render={selected_metric=="RMSE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=RMSE baseline|y[2]=RMSE ML|height=100%|width=100%|> |> <|part|render={selected_metric=="MAE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=MAE baseline|y[2]=MAE ML|height=100%|width=100%|> |> |> <center> <|Compare primarys|button|on_action=compare|> </center> """ lov_menu = [("Data-Visualization", "Data Visualization"), ("Scenario-Manager", "Scenario Manager"), ("Performance", "Performance")] # Create a menu with our pages root_md = "<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>" pages = {"/":root_md, "Data-Visualization":page_data_visualization, "Scenario-Manager":page_scenario_manager, "Performance":page_performance} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value["args"][0]) if __name__ == "__main__": tp.Core().run() Gui(pages=pages).run(dark_mode=False)
|
import numpy as np import pandas as pd from step_04 import tp, baseline_pipeline_cfg, dt from step_02 import * # Initialize the "predictions" dataset predictions_dataset = pd.DataFrame( {"Date": [dt.datetime(2021, 6, 1)], "Historical values": [np.NaN], "Predicted values": [np.NaN]}) # Add a button and a chart for our predictions pipeline_page = page + """ Press <|predict|button|on_action=predict|> to predict with default parameters (30 predictions) and June 1st as day. <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def predict(state): print("'Predict' button clicked") pipeline = create_and_submit_pipeline() update_predictions_dataset(state, pipeline) def create_and_submit_pipeline(): print("Execution of pipeline...") # Create the pipeline from the pipeline config pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(pipeline) return pipeline def create_predictions_dataset(pipeline): print("Creating predictions dataset...") # Read data from the pipeline predictions = pipeline.predictions.read() day = pipeline.day.read() n_predictions = pipeline.n_predictions.read() cleaned_data = pipeline.cleaned_dataset.read() # Set arbitrarily the time window for the chart as 5 times the number of predictions window = 5 * n_predictions # Create the historical dataset that will be displayed new_length = len(cleaned_data[cleaned_data["Date"] < day]) + n_predictions temp_df = cleaned_data[:new_length] temp_df = temp_df[-window:].reset_index(drop=True) # Create the series that will be used in the concat historical_values = pd.Series(temp_df["Value"], name="Historical values") predicted_values = pd.Series([np.NaN] * len(temp_df), name="Predicted values") predicted_values[-len(predictions):] = predictions # Create the predictions dataset # Columns : [Date, Historical values, Predicted values] return pd.concat([temp_df["Date"], historical_values, predicted_values], axis=1) def update_predictions_dataset(state, pipeline): print("Updating predictions dataset...") state.predictions_dataset = create_predictions_dataset(pipeline) if __name__ == "__main__": tp.Core().run() Gui(page=pipeline_page).run(dark_mode=False)
|
from taipy import Gui import pandas as pd def get_data(path_to_csv: str): # pandas.read_csv() returns a pd.DataFrame dataset = pd.read_csv(path_to_csv) dataset["Date"] = pd.to_datetime(dataset["Date"]) return dataset # Read the dataframe path_to_csv = "dataset.csv" dataset = get_data(path_to_csv) # Initial value n_week = 10 # Definition of the page page = """ # Getting started with Taipy Week number: *<|{n_week}|>* Interact with this slider to change the week number: <|{n_week}|slider|min=1|max=52|> ## Dataset: Display the last three months of data: <|{dataset[9000:]}|chart|type=bar|x=Date|y=Value|height=100%|> <|{dataset}|table|height=400px|width=95%|> """ if __name__ == "__main__": # Create a Gui object with our page content Gui(page=page).run(dark_mode=False)
|
from step_10 import * from step_06 import ml_pipeline_cfg from taipy import Config, Frequency from taipy.gui import notify # Create scenarios each week and compare them scenario_daily_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg], frequency=Frequency.DAILY) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Change the inital scenario selector to see which scenarios are primary scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] # Redefine update_scenario_selector to add "*" in the display name when the scnario is primary def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Create the scenario name for the scenario selector # This name changes dependind whether the scenario is primary or not scenario_name = ("*" if scenario.is_primary else "") + scenario.name print(scenario_name) # Update the scenario selector state.scenario_selector += [(scenario.id, scenario_name)] selected_scenario_is_primary = None # Change the create_scenario function to create a scenario with the selected frequency def create_scenario(state): print("Execution of scenario...") # Extra information for scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario with the week cycle scenario = tp.create_scenario(scenario_daily_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Change the scenario that is currently selected submit_scenario(state) # This is the same code as in step_9_dynamic_scenario_creation.py def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes # if state.day != scenario.day.read(): scenario.day.write(state_day) # if int(state.n_predictions) != scenario.n_predictions.read(): scenario.n_predictions.write(int(state.n_predictions)) # if state.max_capacity != scenario.max_capacity.read(): scenario.max_capacity.write(int(state.max_capacity)) # if state.day != scenario.creation_date: scenario.creation_date = state.day # Execute the pipelines/code tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def make_primary(state): print("Making the current scenario primary...") scenario = tp.get(state.selected_scenario[0]) # Take the current scenario primary tp.set_primary(scenario) # Update the scenario selector accordingly state.scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] state.selected_scenario_is_primary = True def remove_scenario_from_selector(state, scenario: list): # Take all the scenarios in the selector that doesn't have the scenario.id state.scenario_selector = [(s[0], s[1]) for s in state.scenario_selector if s[0] != scenario.id] state.selected_scenario = state.scenario_selector[-1] def delete_scenario(state): scenario = tp.get(state.selected_scenario[0]) if scenario.is_primary: # Notify the user that primary scenarios can not be deleted notify(state, "info", "Cannot delete the primary scenario") else: # Delete the scenario and the related objects (datanodes, tasks, jobs,...) tp.delete(scenario.id) # Update the scenario selector accordingly remove_scenario_from_selector(state, scenario) # Add a "Delete scenario" and a "Make primary" buttons page_scenario_manager = """ # Create your scenario: <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/> <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <br/> <br/> <br/> <br/> <|Delete scenario|button|on_action=delete_scenario|active={len(scenario_selector)>0}|> <|Make primary|button|on_action=make_primary|active={not(selected_scenario_is_primary) and len(scenario_selector)>0}|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ lov_menu = [("Data-Visualization", "Data Visualization"), ("Scenario-Manager", "Scenario Manager")] # Create a menu with our pages root_md = "<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>" pages = {"/":root_md, "Data-Visualization":page_data_visualization, "Scenario-Manager":page_scenario_manager} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value["args"][0]) def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update selected_scenario_is_primary indicating if the current scenario is primary or not state.selected_scenario_is_primary = tp.get(state.selected_scenario[0]).is_primary # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": tp.Core().run() Gui(pages=pages).run(dark_mode=False)
|
from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page="# Getting started with *Taipy*").run(dark_mode=False)
|
from step_09 import * from taipy.gui import navigate # Our first page is the original page # (with the slider and the chart that displays a week of the historical data) page_data_visualization = page # Second page: create scenarios and display results page_scenario_manager = """ # Create your scenario <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/>\n <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ lov_menu = [("Data-Visualization", "Data Visualization"), ("Scenario-Manager", "Scenario Manager")] # Create a menu with our pages root_md = "<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>" pages = {"/":root_md, "Data-Visualization":page_data_visualization, "Scenario-Manager":page_scenario_manager} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value["args"][0]) if __name__ == "__main__": tp.Core().run() Gui(pages=pages).run(dark_mode=False)
|
import taipy as tp from step_03 import Config, clean_data_task_cfg, predict_baseline_task_cfg, dt # Create the first pipeline configuration baseline_pipeline_cfg = Config.configure_pipeline(id="baseline", task_configs=[clean_data_task_cfg, predict_baseline_task_cfg]) ## Execute the "baseline" pipeline if __name__ == "__main__": tp.Core().run() # Create the pipeline baseline_pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(baseline_pipeline) # Read output data from the pipeline baseline_predictions = baseline_pipeline.predictions.read() print("Predictions of baseline algorithm\n", baseline_predictions)
|
AWS_ACCESS_KEY = '' AWS_SECRET_KEY = '' AWS_REGION = '' S3_BUCKET_NAME = ''
|
from flask import Flask, render_template, redirect, url_for, request, flash from werkzeug.utils import secure_filename from uploads.file_handler import is_file_type_allowed, upload_file_to_s3, get_presigned_file_url from localStoragePy import localStoragePy from transformers import AutoTokenizer, pipeline # from tensorflow.keras.preprocessing.sequence import pad_sequences # from tensorflow.keras.models import load_model from taipy.gui import Gui import webbrowser import tensorflow as tf import pandas as pd import numpy as np import pytorch_pretrained_bert as ppb assert 'bert-large-cased' in ppb.modeling.PRETRAINED_MODEL_ARCHIVE_MAP app = Flask(__name__) app.secret_key = '3d6f45a5fc12445dbac2f59c3b6c7cb1' localStorage = localStoragePy('app', 'json') target_arr = ["df['col1'].nunique()", "df.sort_values(by=['col1'],inplace =True)", "df.sort_values(by=['col1', 'col2'],inplace =True)", "df.sort_values(by=['col1', 'col2', 'col3'],inplace =True)", "df.drop(columns = 'col1',inplace = True)", "new_df=df.loc[:, ['col1','col2']]", "df['col1'].value_counts()", "<|{dataset}|chart|type=bar|x=col1|y=col2|height=100%|>", "<|{dataset}|chart|type=pie|values=col2|labels=col1|height=100%|>", "<|{dataset}|chart|mode=lines|x=col1|y=col2|>"] portNo = 8888 @app.route("/", methods=['GET']) def home(): return render_template('home.html') @app.route("/upload-file", methods=['POST']) def upload_file(): if 'file' not in request.files: flash('No file uploaded', 'danger') return redirect(url_for('home')) file_to_upload = request.files['file'] if file_to_upload.filename == '': flash('No file uploaded', 'danger') return redirect(url_for('home')) if file_to_upload and is_file_type_allowed(file_to_upload.filename): provided_file_name = secure_filename(file_to_upload.filename) stored_file_name = upload_file_to_s3(file_to_upload, provided_file_name) localStorage.setItem("stored_file_name", stored_file_name) localStorage.setItem("provided_file_name", provided_file_name) flash(f'{provided_file_name} was successfully uploaded', 'success') return redirect(url_for('home')) @app.route("/query", methods=['POST']) def query(): try: query = request.form['query'] provided_file_name = localStorage.getItem("provided_file_name") stored_file_name = localStorage.getItem("stored_file_name") csv = get_presigned_file_url(stored_file_name, provided_file_name) df = pd.read_csv(csv) print("query: " + query) prediction_int, cols_requested = getPredictionInt(df, query) if prediction_int < 7: panda_query = target_arr[prediction_int] print(panda_query) for i in range(len(cols_requested)): panda_query = panda_query.replace("col" + str(i+1), cols_requested[i]) exec(panda_query) html_string = ''' <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet"> </head> <body> <div class="justify-content-center mt-5"> <div class="text-center"> <h4 class="">Download CSV <a href='{new_presigned_url}'> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" style="width:20px; height:20px;"> <path stroke-linecap="round" stroke-linejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3" /> </svg> </a> </h4> <p>Click <a href='/'>here</a> to return to home page</p> {table} </div> </div> </body> </html>. ''' filename = "new.csv" df.to_csv(filename, index=False) file_to_upload = open("new.csv", 'rb') new_provided_file_name = secure_filename(filename) new_stored_file_name = upload_file_to_s3(file_to_upload, new_provided_file_name) new_presigned_url = get_presigned_file_url(new_stored_file_name, new_provided_file_name) print("Presigned url: " + new_presigned_url) df = df.reset_index(drop=True) html = df.to_html(classes='table table-striped table-bordered w-75 mx-auto') html = html.replace("text-align: right;", "text-align: left;") toDisplay = html_string.format(table = html, new_presigned_url = new_presigned_url) return toDisplay else: print("taipy") taipy_query = target_arr[prediction_int] dataset = df for i in range(len(cols_requested)): taipy_query = taipy_query.replace("col" + str(i+1), cols_requested[i]) page = """{0}""" page = page.format(taipy_query) gui = Gui(page) global portNo portNum = portNo portNo += 1 webbrowser.open_new_tab('http://localhost:' + str(portNum)) gui.run(port=portNum) print("hello world") return redirect(url_for('home')) except: print("Invalid query") flash('Invalid query', 'danger') return redirect(url_for('home')) def getPredictionInt(df, query): cols = df.columns sentence = query words = sentence.split() cols_requested = [] for item in cols: for word in words: if(item.upper() == word.upper()): cols_requested.append(item) general_sentence = sentence for i in range(len(cols_requested)): general_sentence = general_sentence.replace(cols_requested[i], "col" + str(i+1)) model_id = "tanishabhagwanani/distilbert-base-uncased-finetuned-emotion" classifier = pipeline("text-classification", model=model_id) custom_question = query preds = classifier(custom_question, return_all_scores=True) preds_df = pd.DataFrame(preds[0]) prediction_int = np.argmax(preds_df.score) return prediction_int, cols_requested if __name__=='__main__': app.run(host="localhost", port=8000, debug=True)
|
import uuid import boto3 from config import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET_NAME s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, region_name=AWS_REGION ) ALLOWED_FILE_TYPES = {'csv'} S3_BUCKET_NAME = S3_BUCKET_NAME S3_EXPIRES_IN_SECONDS = 100 def get_file_type(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() def is_file_type_allowed(filename): return get_file_type(filename) in ALLOWED_FILE_TYPES def upload_file_to_s3(file, provided_file_name): stored_file_name = f'{str(uuid.uuid4())}.{get_file_type(provided_file_name)}' s3.upload_fileobj(file, S3_BUCKET_NAME, stored_file_name) return stored_file_name def get_presigned_file_url(stored_file_name, provided_file_name): if not stored_file_name or not provided_file_name: return return s3.generate_presigned_url( 'get_object', Params = { 'Bucket': S3_BUCKET_NAME, 'Key': stored_file_name, 'ResponseContentDisposition': f"attachment; filename = {provided_file_name}" }, ExpiresIn = S3_EXPIRES_IN_SECONDS )
|
import json def add_line(source, line, step): on_change_needed = ['step_02', 'step_09', 'step_11'] line = line.replace('Getting Started with Taipy', 'Getting Started with Taipy on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/getting_started/src/') line = line.replace('(dataset.csv)', '(https://docs.taipy.io/getting_started/step_01/dataset.csv)') if line.startswith('[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('<div align="center">\n') source.append(f' <img src={img_src} {width}>\n') source.append('</div>\n') elif step == 'step_00' and line.startswith('Gui(page='): source.append('\n') source.append('# We can use Gui("# Getting Started with Taipy").run() directly\n') source.append('# However, we need a Markdown and Gui object to modify the content of the page\n') source.append('# in the Notebook\n') source.append('\n') source.append('main_page = Markdown("# Getting Started with Taipy")\n') source.append('gui = Gui(main_page)\n') source.append('gui.run(dark_mode=False)\n') elif line.startswith('Gui(page=') and step != 'step_00': search_for_md = line.split(')') name_of_md = search_for_md[0][9:] source.append('gui.stop()\n') if step in on_change_needed: source.append('gui.on_change = on_change\n') source.append(f'main_page.set_content({name_of_md})\n') source.append('gui.run()\n') elif step == 'step_00' and line.startswith('from taipy'): source.append("from taipy.gui import Gui, Markdown\n") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": ['# !pip install taipy\n', '# !pip install scikit-learn\n', '# !pip install statsmodels'] }) notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": ['## Using Notebooks\n', 'Some functions will be used in the Getting Started for Notebooks that are primarly used for Notebooks (`gui.stop()`, `gui.run()`, `gui.on_change`, `set_content()`)\n', 'To have more explanation on these different functions, you can find the documentation related [here](https://docs.taipy.io/manuals/gui/notebooks/)\n', '**Warning**: Do not forget to stop your server when you are finished. You can do so by restarting your kernel.\n'] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(0, 10)] + ['step_10', 'step_11', 'step_12'] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\n') cell = "markdown" for line in split_text: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { "cells": [], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2)
|
from step_08 import * # Get all the scenarios already created all_scenarios = tp.get_scenarios() # Delete the scenarios that don't have a name attribute # All the scenarios of the previous steps do not have an associated name so they will be deleted, # this will not be the case for those created by this step [tp.delete(scenario.id) for scenario in all_scenarios if scenario.name is None] # Initial variable for the scenario selector # The list of possible values (lov) for the scenario selector is a list of tuples (scenario_id, scenario_name), # but the selected_scenario is just used to retrieve the scenario id and what gets displayed is the name of the scenario. scenario_selector = [(scenario.id, scenario.name) for scenario in tp.get_scenarios()] selected_scenario = None scenario_manager_page = page + """ # Create your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Create new scenario|button|on_action=create_scenario|> ## Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> ## Display the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_name_for_scenario(state) -> str: name = f"Scenario ({state.day.strftime('%A, %d %b')}; {state.max_capacity}; {state.n_predictions})" # Change the name if it is the same as some scenarios if name in [s[1] for s in state.scenario_selector]: name += f" ({len(state.scenario_selector)})" return name # Change the create_scenario function in order to change the default parameters # and allow the creation of multiple scenarios def create_scenario(state): print("Execution of scenario...") # Extra information for the scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario scenario = tp.create_scenario(scenario_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Submit the scenario that is currently selected submit_scenario(state) def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format (change?) day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes scenario.day.write(day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) scenario.creation_date = state.day # Execute the scenario tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Update the scenario selector state.scenario_selector += [(scenario.id, scenario.name)] def update_chart(state): # Now, the selected_scenario comes from the state, it is interactive scenario = tp.get(state.selected_scenario[0]) pipeline = scenario.pipelines[state.selected_pipeline] update_predictions_dataset(state, pipeline) def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update the chart when the scenario or the pipeline is changed # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": Gui(page=scenario_manager_page).run(dark_mode=False)
|
from step_07 import * # Initial variables ## Initial variables for the scenario day = dt.datetime(2021, 7, 26) n_predictions = 40 max_capacity = 200 page_scenario_manager = page + """ # Change your scenario **Prediction date**\n\n <|{day}|date|not with_time|> **Max capacity**\n\n <|{max_capacity}|number|> **Number of predictions**\n\n<|{n_predictions}|number|> <|Save changes|button|on_action={submit_scenario}|> Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action={update_chart}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): global selected_scenario print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) selected_scenario = scenario.id tp.submit(scenario) def submit_scenario(state): print("Submitting scenario...") # Get the selected scenario: in this current step a single scenario is created then modified here. scenario = tp.get(selected_scenario) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the datanodes scenario.day.write(state_day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) # Execute the pipelines/code tp.submit(scenario) # Update the chart when we change the scenario update_chart(state) def update_chart(state): # Select the right scenario and pipeline scenario = tp.get(selected_scenario) pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline update_predictions_dataset(state, pipeline) if __name__ == "__main__": global selected_scenario # Creation of a single scenario create_scenario() Gui(page=page_scenario_manager).run(dark_mode=False)
|
import datetime as dt import pandas as pd from taipy import Config, Scope from step_01 import path_to_csv # Datanodes (3.1) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", path=path_to_csv, scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id="day", default_data=dt.datetime(2021, 7, 26)) n_predictions_cfg = Config.configure_data_node(id="n_predictions", default_data=40) max_capacity_cfg = Config.configure_data_node(id="max_capacity", default_data=200) ## Remaining Data Nodes cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset", cacheable=True, validity_period=dt.timedelta(days=1), scope=Scope.GLOBAL) predictions_cfg = Config.configure_data_node(id="predictions", scope=Scope.PIPELINE) # Functions (3.2) def clean_data(initial_dataset: pd.DataFrame): print(" Cleaning data") # Convert the date column to datetime initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def predict_baseline(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting baseline") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset['Date'] < day] predictions = train_dataset['Value'][-n_predictions:].reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Tasks (3.3) clean_data_task_cfg = Config.configure_task(id="clean_data", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg) predict_baseline_task_cfg = Config.configure_task(id="predict_baseline", function=predict_baseline, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg)
|
from step_05 import * from step_06 import scenario_cfg from taipy import Config # Set the list of pipelines names # It will be used in a selector of pipelines pipeline_selector = ["baseline", "ml"] selected_pipeline = pipeline_selector[0] scenario_page = page + """ Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action=update_chart|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def create_scenario(): print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) scenario = submit_scenario(scenario) return scenario def submit_scenario(scenario): print("Submitting scenario...") tp.submit(scenario) return scenario def update_chart(state): print("'Update chart' button clicked") # Select the right pipeline pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline # It is the same function as created before in step_5 update_predictions_dataset(state, pipeline) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Creation of our first scenario scenario = create_scenario() Gui(page=scenario_page).run(dark_mode=False)
|
# For the sake of clarity, we have used an AutoRegressive model rather than a pure ML model such as: # Random Forest, Linear Regression, LSTM, etc from statsmodels.tsa.ar_model import AutoReg from taipy import Config from step_04 import * from step_03 import cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg, predictions_cfg, pd, dt # This is the function that will be used by the task def predict_ml(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting with ML") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset["Date"] < day] # Fit the AutoRegressive model model = AutoReg(train_dataset["Value"], lags=7).fit() # Get the n_predictions forecasts predictions = model.forecast(n_predictions).reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Create the task configuration of the predict_ml function. ## We use the same input and ouput as the previous predict_baseline task but we change the funtion predict_ml_task_cfg = Config.configure_task(id="predict_ml", function=predict_ml, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) # Create the new pipeline that will clean and predict with the ml model ml_pipeline_cfg = Config.configure_pipeline(id="ml", task_configs=[clean_data_task_cfg, predict_ml_task_cfg]) # Configure our scenario which is our business problem. scenario_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg]) # The configuration is now complete if __name__ == "__main__": # Create the scenario scenario = tp.create_scenario(scenario_cfg) # Execute it tp.submit(scenario) # Get the resulting scenario ## Print the predictions of the two pipelines (baseline and ml) print("\nBaseline predictions\n", scenario.baseline.predictions.read()) print("\nMachine Learning predictions\n", scenario.ml.predictions.read())
|
from step_01 import dataset, n_week, Gui # Select the week based on the slider value dataset_week = dataset[dataset["Date"].dt.isocalendar().week == n_week] page = """ # Getting started with Taipy Select week: *<|{n_week}|>* <|{n_week}|slider|min=1|max=52|> <|{dataset_week}|chart|type=bar|x=Date|y=Value|height=100%|width=100%|> """ # on_change is the function that is called when any variable is changed def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] if __name__ == "__main__": Gui(page=page).run(dark_mode=False)
|
from step_11 import * from sklearn.metrics import mean_absolute_error, mean_squared_error # Initial dataset for comparison comparison_scenario = pd.DataFrame({"Scenario Name": [], "RMSE baseline": [], "MAE baseline": [], "RMSE ML": [], "MAE ML": []}) # Indicates if the comparison is done comparison_scenario_done = False # Selector for metrics metric_selector = ["RMSE", "MAE"] selected_metric = metric_selector[0] def compute_metrics(historical_data, predicted_data): rmse = mean_squared_error(historical_data, predicted_data) mae = mean_absolute_error(historical_data, predicted_data) return rmse, mae def compare(state): print("Comparing...") # Initial lists for comparison scenario_names = [] rmses_baseline = [] maes_baseline = [] rmses_ml = [] maes_ml = [] # Go through all the primary scenarios all_scenarios = tp.get_primary_scenarios() all_scenarios_ordered = sorted(all_scenarios, key=lambda x: x.creation_date.timestamp()) for scenario in all_scenarios_ordered: print(f"Scenario {scenario.name}") # Go through all the pipelines for pipeline in scenario.pipelines.values(): print(f" Pipeline {pipeline.config_id}") # Get the predictions dataset with the historical data only_prediction_dataset = create_predictions_dataset(pipeline)[-pipeline.n_predictions.read():] # Series to compute the metrics (true values and predicted values) historical_values = only_prediction_dataset["Historical values"] predicted_values = only_prediction_dataset["Predicted values"] # Compute the metrics for this pipeline and primary scenario rmse, mae = compute_metrics(historical_values, predicted_values) # Add values to the appropriate lists if "baseline" in pipeline.config_id: rmses_baseline.append(rmse) maes_baseline.append(mae) elif "ml" in pipeline.config_id: rmses_ml.append(rmse) maes_ml.append(mae) scenario_names.append(scenario.creation_date.strftime("%A %d %b")) # Update comparison_scenario state.comparison_scenario = pd.DataFrame({"Scenario Name": scenario_names, "RMSE baseline": rmses_baseline, "MAE baseline": maes_baseline, "RMSE ML": rmses_ml, "MAE ML": maes_ml}) # When comparison_scenario_done will be set to True, # the part with the graphs will be finally rendered state.comparison_scenario_done = True # Performance page page_performance = """ <br/> <|part|render={comparison_scenario_done}| <|Table|expanded=False|expandable| <|{comparison_scenario}|table|width=100%|> |> <|{selected_metric}|selector|lov={metric_selector}|dropdown|> <|part|render={selected_metric=="RMSE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=RMSE baseline|y[2]=RMSE ML|height=100%|width=100%|> |> <|part|render={selected_metric=="MAE"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=MAE baseline|y[2]=MAE ML|height=100%|width=100%|> |> |> <center> <|Compare primarys|button|on_action=compare|> </center> """ # Add the page_performance section to the menu multi_pages = """ <|menu|label=Menu|lov={["Data Visualization", "Scenario Manager", "Performance"]}|on_action=menu_fct|> <|part|render={page=="Data Visualization"}|""" + page_data_visualization + """|> <|part|render={page=="Scenario Manager"}|""" + page_scenario_manager + """|> <|part|render={page=="Performance"}|""" + page_performance + """|> """ if __name__ == "__main__": Gui(page=multi_pages).run(dark_mode=False)
|
import numpy as np import pandas as pd from step_04 import tp, baseline_pipeline_cfg, dt from step_02 import * # Initialize the "predictions" dataset predictions_dataset = pd.DataFrame( {"Date": [dt.datetime(2021, 6, 1)], "Historical values": [np.NaN], "Predicted values": [np.NaN]}) # Add a button and a chart for our predictions pipeline_page = page + """ Press <|predict|button|on_action=predict|> to predict with default parameters (30 predictions) and June 1st as day. <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> """ def predict(state): print("'Predict' button clicked") pipeline = create_and_submit_pipeline() update_predictions_dataset(state, pipeline) def create_and_submit_pipeline(): print("Execution of pipeline...") # Create the pipeline from the pipeline config pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(pipeline) return pipeline def create_predictions_dataset(pipeline): print("Creating predictions dataset...") # Read data from the pipeline predictions = pipeline.predictions.read() day = pipeline.day.read() n_predictions = pipeline.n_predictions.read() cleaned_data = pipeline.cleaned_dataset.read() # Set arbitrarily the time window for the chart as 5 times the number of predictions window = 5 * n_predictions # Create the historical dataset that will be displayed new_length = len(cleaned_data[cleaned_data["Date"] < day]) + n_predictions temp_df = cleaned_data[:new_length] temp_df = temp_df[-window:].reset_index(drop=True) # Create the series that will be used in the concat historical_values = pd.Series(temp_df["Value"], name="Historical values") predicted_values = pd.Series([np.NaN] * len(temp_df), name="Predicted values") predicted_values[-len(predictions):] = predictions # Create the predictions dataset # Columns : [Date, Historical values, Predicted values] return pd.concat([temp_df["Date"], historical_values, predicted_values], axis=1) def update_predictions_dataset(state, pipeline): print("Updating predictions dataset...") state.predictions_dataset = create_predictions_dataset(pipeline) if __name__ == "__main__": Gui(page=pipeline_page).run(dark_mode=False)
|
from taipy import Gui import pandas as pd def get_data(path_to_csv: str): # pandas.read_csv() returns a pd.DataFrame dataset = pd.read_csv(path_to_csv) dataset["Date"] = pd.to_datetime(dataset["Date"]) return dataset # Read the dataframe path_to_csv = "dataset.csv" dataset = get_data(path_to_csv) # Initial value n_week = 10 # Definition of the page page = """ # Getting started with Taipy Week number: *<|{n_week}|>* Interact with this slider to change the week number: <|{n_week}|slider|min=1|max=52|> ## Dataset: Display the last three months of data: <|{dataset[9000:]}|chart|type=bar|x=Date|y=Value|height=100%|> <|{dataset}|table|height=400px|width=95%|> """ if __name__ == "__main__": # Create a Gui object with our page content Gui(page=page).run(dark_mode=False)
|
from step_10 import * from step_06 import ml_pipeline_cfg from taipy import Config, Frequency from taipy.gui import notify # Create scenarios each week and compare them scenario_daily_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg], frequency=Frequency.DAILY) if __name__ == "__main__": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Change the inital scenario selector to see which scenarios are primary scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] # Redefine update_scenario_selector to add "*" in the display name when the scnario is primary def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Create the scenario name for the scenario selector # This name changes dependind whether the scenario is primary or not scenario_name = ("*" if scenario.is_primary else "") + scenario.name print(scenario_name) # Update the scenario selector state.scenario_selector += [(scenario.id, scenario_name)] selected_scenario_is_primary = None # Change the create_scenario function to create a scenario with the selected frequency def create_scenario(state): print("Execution of scenario...") # Extra information for scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario with the week cycle scenario = tp.create_scenario(scenario_daily_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Change the scenario that is currently selected submit_scenario(state) # This is the same code as in step_9_dynamic_scenario_creation.py def submit_scenario(state): print("Submitting scenario...") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes # if state.day != scenario.day.read(): scenario.day.write(state_day) # if int(state.n_predictions) != scenario.n_predictions.read(): scenario.n_predictions.write(int(state.n_predictions)) # if state.max_capacity != scenario.max_capacity.read(): scenario.max_capacity.write(int(state.max_capacity)) # if state.day != scenario.creation_date: scenario.creation_date = state.day # Execute the pipelines/code tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def make_primary(state): print("Making the current scenario primary...") scenario = tp.get(state.selected_scenario[0]) # Take the current scenario primary tp.set_primary(scenario) # Update the scenario selector accordingly state.scenario_selector = [(scenario.id, ("*" if scenario.is_primary else "") + scenario.name) for scenario in tp.get_scenarios()] state.selected_scenario_is_primary = True def remove_scenario_from_selector(state, scenario: list): # Take all the scenarios in the selector that doesn't have the scenario.id state.scenario_selector = [(s[0], s[1]) for s in state.scenario_selector if s[0] != scenario.id] state.selected_scenario = state.scenario_selector[-1] def delete_scenario(state): scenario = tp.get(state.selected_scenario[0]) if scenario.is_primary: # Notify the user that primary scenarios can not be deleted notify(state, "info", "Cannot delete the primary scenario") else: # Delete the scenario and the related objects (datanodes, tasks, jobs,...) tp.delete(scenario.id) # Update the scenario selector accordingly remove_scenario_from_selector(state, scenario) # Add a "Delete scenario" and a "Make primary" buttons page_scenario_manager = """ # Create your scenario: <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/> <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <br/> <br/> <br/> <br/> <|Delete scenario|button|on_action=delete_scenario|active={len(scenario_selector)>0}|> <|Make primary|button|on_action=make_primary|active={not(selected_scenario_is_primary) and len(scenario_selector)>0}|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ # Redefine the multi_pages multi_pages = """ <|menu|label=Menu|lov={["Data Visualization", "Scenario Manager"]}|on_action=menu_fct|> <|part|render={page=="Data Visualization"}|""" + page_data_visualization + """|> <|part|render={page=="Scenario Manager"}|""" + page_scenario_manager + """|> """ def on_change(state, var_name: str, var_value): if var_name == "n_week": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == var_value] elif var_name == "selected_pipeline" or var_name == "selected_scenario": # Update selected_scenario_is_primary indicating if the current scenario is primary or not state.selected_scenario_is_primary = tp.get(state.selected_scenario[0]).is_primary # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == "__main__": Gui(page=multi_pages).run(dark_mode=False)
|
from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page="# Getting started with *Taipy*").run(dark_mode=False)
|
from step_09 import * # Our first page is the original page # (with the slider and the chart that displays a week of the historical data) page_data_visualization = page # Second page: create scenarios and display results page_scenario_manager = """ # Create your scenario <|layout|columns=1 1 1 1| <| **Prediction date**\n\n <|{day}|date|not with_time|> |> <| **Max capacity**\n\n <|{max_capacity}|number|> |> <| **Number of predictions**\n\n<|{n_predictions}|number|> |> <| <br/> <br/>\n <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <| ## Scenario \n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <| ## Display the pipeline \n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> """ # Create a menu with our pages multi_pages = """ <|menu|label=Menu|lov={["Data Visualization", "Scenario Manager"]}|on_action=menu_fct|> <|part|render={page=="Data Visualization"}|""" + page_data_visualization + """|> <|part|render={page=="Scenario Manager"}|""" + page_scenario_manager + """|> """ # The initial page is the "Data Visualization" page page = "Data Visualization" def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page state.page = var_value["args"][0] if __name__ == "__main__": Gui(page=multi_pages).run(dark_mode=False)
|
import taipy as tp from step_03 import Config, clean_data_task_cfg, predict_baseline_task_cfg, dt # Create the first pipeline configuration baseline_pipeline_cfg = Config.configure_pipeline(id="baseline", task_configs=[clean_data_task_cfg, predict_baseline_task_cfg]) ## Execute the "baseline" pipeline if __name__ == "__main__": # Create the pipeline baseline_pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(baseline_pipeline) # Read output data from the pipeline baseline_predictions = baseline_pipeline.predictions.read() print("Predictions of baseline algorithm\n", baseline_predictions)
|
from taipy.gui import Gui from keras.models import load_model from PIL import Image import numpy as np class_names = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck', } model = load_model("Neural Network Notebook/Cifar10Model.keras") def predict_image(model, path_to_img): img = Image.open(path_to_img) img = img.convert("RGB") img = img.resize((32, 32)) data = np.asarray(img) data = data / 255 probs = model.predict(np.array([data])[:1]) top_prob = probs.max() top_pred = class_names[np.argmax(probs)] return top_prob, top_pred content = "" img_path = "placeholder_image.png" prob = 0 pred = "" index = """ <|text-center| <|{"logo.png"}|image|width=16vw|> <|{content}|file_selector|extensions=.png|> select an image from your file system <|{pred}|> <|{img_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > """ def on_change(state, var_name, var_val): if var_name == "content": top_prob, top_pred = predict_image(model, var_val) state.prob = round(top_prob * 100) state.pred = "this is a " + top_pred state.img_path = var_val #print(var_name, var_val) app = Gui(page=index) if __name__ == "__main__": app.run(use_reloader=True)
|
import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error import numpy as np # Import numpy for RMSE calculation from prophet import Prophet def build_message(name: str): return f"Hello {name}!" def clean_data(initial_dataset: pd.DataFrame): return initial_dataset def retrained_model(cleaned_dataset: pd.DataFrame): # Split the dataset into features (X) and target (y) X = cleaned_dataset.drop('Claim_Amount', axis=1) y = cleaned_dataset['Claim_Amount'] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Define the categorical columns for one-hot encoding categorical_cols = ['Procedure_Code', 'Diagnosis_Code', 'Provider_Specialty', 'Insurance_Plan'] # Create a column transformer preprocessor = ColumnTransformer( transformers=[ ('cat', OneHotEncoder(drop='first'), categorical_cols) ], remainder='passthrough' ) # Create a pipeline with preprocessing and the Random Forest Regressor model = Pipeline([ ('preprocessor', preprocessor), ('regressor', RandomForestRegressor(n_estimators=100, random_state=42)) ]) # Fit the model on the training data model.fit(X_train, y_train) # Make predictions on the test set predictions = model.predict(X_test) # Calculate Mean Squared Error (MSE) mse = mean_squared_error(y_test, predictions) # Calculate Root Mean Squared Error (RMSE) rmse = np.sqrt(mse) # Print the RMSE print(f"Mean Squared Error: {mse}") print(f"Root Mean Squared Error (RMSE): {rmse}") return model def predict(model): # Example: Make a prediction for a new patient new_patient_data = pd.DataFrame({ 'Procedure_Code': ['CPT456'], 'Diagnosis_Code': ['ICD-10-B'], 'Provider_Specialty': ['Orthopedics'], 'Patient_Age': [35], 'Insurance_Plan': ['PPO'], 'Deductible': [200], 'Copayment': [30], 'Coinsurance': [20], }, index=[0]) # Predict the claim amount for the new patient new_patient_claim = model.predict(new_patient_data) print(f"Predicted Claim Amount for New Patient: ${new_patient_claim[0]:.2f}")
|
import taipy as tp from taipy.core.config import Config Config.load('my_config.toml') scenario_cfg = Config.scenarios['scenario'] if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) print("submitting") scenario_1.submit() print("submit shit ayyindhi")
|
from taipy.gui import Html html_page = Html(""" <head> <script src="https://maps.googleapis.com/maps/api/js?key=AIzaSyBIeklfsRu1yz97lY2gJzWHJcmrd7lx2zU&libraries=places"></script> <script type="text/javascript"> function initialize() { geocoder = new google.maps.Geocoder(); var mapOptions = { } var locations = ["12836 University Club Dr", "2204 Fitness Club Way"]; var markers = []; var iterator = 0; var bounds = new google.maps.LatLngBounds(); for (var i = 0; i < locations.length; i++) { setTimeout(function() { geocoder.geocode({'address': locations[iterator]}, function(results, status){ if (status == google.maps.GeocoderStatus.OK) { var marker = new google.maps.Marker({ map: map, position: results[0].geometry.location, animation: google.maps.Animation.DROP }); bounds.extend(marker.getPosition()); map.fitBounds(bounds); } else { console.log('Geocode was not successful for the following reason: ' + status); } }); iterator++; }, i * 250); } var map = new google.maps.Map(document.getElementById("map-canvas"), mapOptions); } google.maps.event.addDomListener(window, 'load', initialize); </script> </head> <body> <div id="map-canvas" style="width: 100%; height: 400px;"></div> </body> """)
|
from geopy.geocoders import Nominatim import folium user_agent = "geoapiExercises/1.0 AIzaSyBIeklfsRu1yz97lY2gJzWHJcmrd7lx2zU" # Initialize the geocoder with the user agent geolocator = Nominatim(user_agent=user_agent, timeout=10) # List of locations to geocode locations = ["Denver, CO, United States", "New York, NY, United States", "Los Angeles, CA, United States"] # Create an empty map map_location = folium.Map(location=[0, 0], zoom_start=5) # Iterate through the list of locations for location in locations: # Perform geocoding location_info = geolocator.geocode(location) if location_info: # Extract latitude and longitude latitude = location_info.latitude longitude = location_info.longitude # Add a marker for the geocoded location folium.Marker([latitude, longitude], popup=location).add_to(map_location) else: print(f"Geocoding was not successful for the location: {location}") # Save or display the map (as an HTML file) map_location.save("geocoded_locations_map.html") print("Map created and saved as 'geocoded_locations_map.html'")
|
from taipy.gui import Gui, notify import pandas as pd import yfinance as yf from taipy.config import Config import taipy as tp import datetime as dt from taipy import Core from show_hospitals_map import html_page from flask import Flask, request, session, jsonify, redirect, render_template from flask_restful import Api, Resource import requests Config.load("config_model_train.toml") scenario_cfg = Config.scenarios['stock'] tickers = yf.Tickers("msft aapl goog") root_md = "<|navbar|>" property_chart = { "type": "lines", "x": "Date", "y[1]": "Open", "y[2]": "Close", "y[3]": "High", "y[4]": "Low", "color[1]": "green", "color[2]": "grey", "color[3]": "red", "color[4]": "yellow", } df = pd.DataFrame([], columns=["Date", "High", "Low", "Open", "Close"]) df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) stock = "" stock_text = "No Stock to Show" chart_text = "No Chart to Show" stocks = [] page = """ # Stock Portfolio ### Choose the stock to show <|toggle|theme|> <|layout|columns=1 1| <| <|{stock_text}|> <|{stock}|selector|lov=MSFT;AAPL;GOOG;Reset|dropdown|> <|Press for Stock|button|on_action=on_button_action|> <|Get the future predictions|button|on_action=get_predictions|> |> <|{stock} <|{chart_text}|> <|{df}|chart|properties={property_chart}|> |> |> """ pages = { "/" : root_md, "home" : page, "claim": "empty page" } def on_button_action(state): if state.stock == "Reset": state.stock_text = "No Stock to Show" state.chart_text = "No Chart to Show" state.df = pd.DataFrame([], columns=["Date", "High", "Low", "Open", "Close"]) state.df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) state.pred_text = "No Prediction to Show" else: state.stock_text = f"The stock is {state.stock}" state.chart_text = f"Monthly history of stock {state.stock}" state.df = tickers.tickers[state.stock].history().reset_index() state.df.to_csv(f"{stock}.csv", index=False) def get_predictions(state): scenario_stock = tp.create_scenario(scenario_cfg) scenario_stock.initial_dataset.path = f"{stock}".csv notify(state, 'success', 'camehere') scenario_stock.write(state.df) tp.submit(scenario_stock) state.df_pred = scenario_stock.predictions.read() state.df_pred.to_csv("pred.csv", index=False) tp.Core().run() # Gui(pages=pages).run(use_reloader=True) app = Flask(__name__) # app = Flask(__name__) app.secret_key = "your_secret_key" # Set a secret key for session management api = Api(app) class SignupResource(Resource): def get(self): return redirect("/signup.html") def post(self): SIGNUP_API_URL = "https://health-insurance-rest-apis.onrender.com/api/signup" signup_data = { 'username': request.form['username'], 'password': request.form['password'], 'email': request.form['email'] } headers = { 'Content-Type': 'application/json' } print(signup_data) response = requests.post(SIGNUP_API_URL, headers=headers, json=signup_data) print("response", response) if response.status_code == 200: return redirect("/login.html") else: return 'Signup Failed' # Login Resource class LoginResource(Resource): def get(self): """ Return a simple login page HTML """ return redirect("/login.html") def post(self): email = request.form['email'] password = request.form['password'] auth_data = { 'username': email, 'password': password } AUTH_API_URL = "https://health-insurance-rest-apis.onrender.com/api/login" response = requests.post(AUTH_API_URL, json=auth_data) if response.status_code == 200: auth_data = response.json() access_token = auth_data.get('access_token') refresh_token = auth_data.get('refresh_token') # Store tokens in the session session['access_token'] = access_token session['refresh_token'] = refresh_token return redirect("/home") else: return 'Login failed', 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 print("registered the apis") # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') api.add_resource(SignupResource, '/signup') @app.before_request def check_access_token(): # print ('access_token' in session, "checkIt") if request.endpoint != 'login' and 'access_token' not in session: # # Redirect to the login page if not on the login route and no access_token is in the session # print(request.endpoint, "endpoint") return redirect("/login") gui = Gui(pages=pages, flask=app).run(debug=False)
|
from taipy import Config, Scope import pandas as pd from prophet import Prophet from functions import * # Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", default_path='df.csv') cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset") clean_data_task_cfg = Config.configure_task(id="clean_data_task", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) model_training_cfg = Config.configure_data_node(id="model_output") predictions_cfg = Config.configure_data_node(id="predictions") model_training_task_cfg = Config.configure_task(id="model_retraining_task", function=retrained_model, input=cleaned_dataset_cfg, output=model_training_cfg, skippable=True) predict_task_cfg = Config.configure_task(id="predict_task", function=predict, input=model_training_cfg, output=predictions_cfg, skippable=True) # Create the first pipeline configuration # retraining_model_pipeline_cfg = Config.configure_pipeline( # id="model_retraining_pipeline", # task_configs=[clean_data_task_cfg, model_training_task_cfg], # ) # Run the Taipy Core service # import taipy as tp # # Run of the Taipy Core service # tp.Core().run() # # Create the pipeline # retrain_pipeline = tp.create_pipeline(retraining_model_pipeline_cfg) # # Submit the pipeline # tp.submit(retrain_pipeline) # tp.Core().stop() scenario_cfg = Config.configure_scenario_from_tasks(id="stock", task_configs=[clean_data_task_cfg, model_training_task_cfg, predict_task_cfg]) # tp.Core().run() # tp.submit(scenario_cfg) Config.export("config_model_train.toml")
|
from taipy import Config from functions import build_message name_data_node_cfg = Config.configure_data_node(id="name") message_data_node_cfg = Config.configure_data_node(id="message") build_msg_task_cfg = Config.configure_task("build_msg", build_message, name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario_from_tasks("scenario", task_configs=[build_msg_task_cfg]) Config.export('my_config.toml')
|
from functools import wraps import jwt from flask import request, abort from flask import current_app def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if "Authorization" in request.headers: token = request.headers["Authorization"].split(" ")[1] if not token: return { "message": "Authentication Token is missing!", "data": None, "error": "Unauthorized" }, 401 try: # data=jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["RS256"]) print("got the token") # current_user=models.User().get_by_id(data["user_id"]) current_user = 12 if current_user is None: return { "message": "Invalid Authentication token!", "data": None, "error": "Unauthorized" }, 401 if not current_user["active"]: abort(403) except Exception as e: return { "message": "Something went wrong", "data": None, "error": str(e) }, 500 return f(current_user, *args, **kwargs) return decorated
|
from flask import Flask, request, session, jsonify from flask_restful import Api, Resource app = Flask(__name__) app.secret_key = "your_secret_key" # Set a secret key for session management api = Api(app) # Dummy user data for demonstration users = { 'maneesh': {'password': 'securepassword'} } # Login Resource class LoginResource(Resource): def post(self): data = request.get_json() username = data.get('username') password = data.get('password') print("hello") # Check if user exists and password is correct if username in users and users[username]['password'] == password: # Simulate receiving a JWT token from a third-party API jwt_token = "your_received_jwt_token" # Store the JWT token in the session session['jwt_token'] = jwt_token return {'message': 'Login successful'}, 200 else: return {'message': 'Invalid credentials'}, 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') if __name__ == '__main__': app.run(debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.