text
stringlengths 44
15.3k
|
|---|
# Create app for demo-drift-detection root.py
"""
The rootpage of the application.
Page content is imported from the root.md file.
Please refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details.
"""
from taipy.gui import Markdown
root_page = Markdown("pages/root.md")
|
# Create app for demo-drift-detection Drift.py
"""
A page of the application.
Page content is imported from the Drift.md file.
Please refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details.
"""
import taipy as tp
from taipy.gui import Markdown
import pandas as pd
from taipy.gui import notify
from configuration.config import scenario_cfg
Drift = Markdown("pages/Drift/Drift.md")
def merge_data(ref_data: pd.DataFrame, compare_data: pd.DataFrame):
"""
Merges the reference and comparison data into a single dataframe.
The Dataframe is prepared for plotting.
Args:
ref_data: The reference data.
compare_data: The comparison data.
Returns:
plot_data: The dataset for other columns.
sex_data: The dataset for sex distribution.
"""
bp_data = [
{"Blood Pressure": list(ref_data["blood_pressure"])},
{"Blood Pressure": list(compare_data["blood_pressure"])},
]
# Count the Male and Female rows in ref and compare
male_ref = ref_data[ref_data["sex"] == "Male"].shape[0]
male_compare = compare_data[compare_data["sex"] == "Male"].shape[0]
female_ref = ref_data[ref_data["sex"] == "Female"].shape[0]
female_compare = compare_data[compare_data["sex"] == "Female"].shape[0]
sex_data = pd.DataFrame(
{
"Dataset": ["Ref", "Compare"],
"Male": [male_ref, male_compare],
"Female": [female_ref, female_compare],
}
)
return bp_data, sex_data
def on_ref_change(state):
state.ref_data = pd.read_csv("data/" + state.ref_selected + ".csv")
state.scenario.reference_data.write(state.ref_data)
state.bp_data, state.sex_data = merge_data(state.ref_data, state.compare_data)
def on_compare_change(state):
state.compare_data = pd.read_csv("data/" + state.compare_selected + ".csv")
state.scenario.compare_data.write(state.compare_data)
state.bp_data, state.sex_data = merge_data(state.ref_data, state.compare_data)
bp_options = [
# First data set displayed as green-ish, and 5 bins
{
"marker": {"color": "#4A4", "opacity": 0.8},
"nbinsx": 10,
},
# Second data set displayed as red-ish, and 25 bins
{
"marker": {"color": "#A33", "opacity": 0.8, "text": "Compare Data"},
"nbinsx": 10,
},
]
bp_layout = {
# Overlay the two histograms
"barmode": "overlay",
"title": "Blood Pressure Distribution (Green = Reference, Red = Compare)",
"showlegend": False,
}
def on_submission_status_change(state, submittable, details):
submission_status = details.get("submission_status")
if submission_status == "COMPLETED":
notify(state, "success", "Drift Detection Completed")
state.refresh("scenario")
|
# Create app for demo-drift-detection Drift.md
<|layout|columns=1 1|
<|part|class_name=card|
### Select Reference Data<br/>
<|{ref_selected}|selector|lov=data_ref;data_noisy;data_female;data_big|dropdown|on_change=on_ref_change|>
|>
<|part|class_name=card|
### Select Comparison Data<br/>
<|{compare_selected}|selector|lov=data_ref;data_noisy;data_female;data_big|dropdown|on_change=on_compare_change|>
|>
|>
<|Reference Dataset and Compare Dataset|expandable|expanded=True|
<|layout|columns=1 1|
<|{ref_data}|table|page_size=5|>
<|{compare_data}|table|page_size=5|>
|>
|>
<|layout|columns=1 1|
<|part|class_name=card|
<|{sex_data}|chart|type=bar|x=Dataset|y[1]=Male|y[2]=Female|title=Sex Distribution|>
|>
<|part|class_name=card|
<|{bp_data}|chart|type=histogram|options={bp_options}|layout={bp_layout}|>
|>
|>
<br/>
### Run the scenario:
<|{scenario}|scenario|on_submission_change=on_submission_status_change|expandable=False|expanded=False|>
<|{scenario}|scenario_dag|>
<br/>
### View the results:
<|{scenario.drift_results if scenario else None}|data_node|>
|
# Create app for demo-covid-dashboard main.py
from taipy.gui import Gui
import taipy as tp
from pages.country.country import country_md
from pages.world.world import world_md
from pages.map.map import map_md
from pages.predictions.predictions import predictions_md, selected_scenario
from pages.root import root, selected_country, selector_country
from config.config import Config
pages = {
'/':root,
"Country":country_md,
"World":world_md,
"Map":map_md,
"Predictions":predictions_md
}
gui_multi_pages = Gui(pages=pages)
if __name__ == '__main__':
tp.Core().run()
gui_multi_pages.run(title="Covid Dashboard")
|
# Create app for demo-covid-dashboard config.py
from taipy.config import Config, Scope
import datetime as dt
from algos.algos import add_features, create_train_data, preprocess,\
train_arima, train_linear_regression,\
forecast, forecast_linear_regression,\
concat
#Config.configure_job_executions(mode="standalone", nb_of_workers=2)
path_to_data = "data/covid-19-all.csv"
initial_data_cfg = Config.configure_data_node(id="initial_data",
storage_type="csv",
path=path_to_data,
cacheable=True,
validity_period=dt.timedelta(days=5),
scope=Scope.GLOBAL)
country_cfg = Config.configure_data_node(id="country", default_data="France",
validity_period=dt.timedelta(days=5))
date_cfg = Config.configure_data_node(id="date", default_data=dt.datetime(2020,10,1),
validity_period=dt.timedelta(days=5))
final_data_cfg = Config.configure_data_node(id="final_data",
validity_period=dt.timedelta(days=5))
train_data_cfg = Config.configure_data_node(id="train_data",
validity_period=dt.timedelta(days=5))
model_arima_cfg = Config.configure_data_node(id="model_arima", validity_period=dt.timedelta(days=5))
model_linear_regression_cfg = Config.configure_data_node(id="model_linear_regression", validity_period=dt.timedelta(days=5))
predictions_arima_cfg = Config.configure_data_node(id="predictions_arima")
predictions_linear_regression_cfg = Config.configure_data_node(id="predictions_linear_regression")
result_cfg = Config.configure_data_node(id="result")
task_preprocess_cfg = Config.configure_task(id="task_preprocess_data",
function=preprocess,
input=[initial_data_cfg, country_cfg, date_cfg],
output=[final_data_cfg,train_data_cfg])
task_train_arima_cfg = Config.configure_task(id="task_train",
function=train_arima,
input=train_data_cfg,
output=model_arima_cfg)
task_forecast_arima_cfg = Config.configure_task(id="task_forecast",
function=forecast,
input=model_arima_cfg,
output=predictions_arima_cfg)
task_train_linear_regression_cfg = Config.configure_task(id="task_train_linear_regression",
function=train_linear_regression,
input=train_data_cfg,
output=model_linear_regression_cfg)
task_forecast_linear_regression_cfg = Config.configure_task(id="task_forecast_linear_regression",
function=forecast_linear_regression,
input=[model_linear_regression_cfg, date_cfg],
output=predictions_linear_regression_cfg)
task_result_cfg = Config.configure_task(id="task_result",
function=concat,
input=[final_data_cfg, predictions_arima_cfg, predictions_linear_regression_cfg, date_cfg],
output=result_cfg)
scenario_cfg = Config.configure_scenario(id='scenario', task_configs=[task_preprocess_cfg,
task_train_arima_cfg,
task_forecast_arima_cfg,
task_train_linear_regression_cfg,
task_forecast_linear_regression_cfg,
task_result_cfg])
Config.export('config/config.toml')
|
# Create app for demo-covid-dashboard algos.py
import pandas as pd
from sklearn.linear_model import LinearRegression
import datetime as dt
import numpy as np
from pmdarima import auto_arima
def add_features(data):
dates = pd.to_datetime(data["Date"])
data["Months"] = dates.dt.month
data["Days"] = dates.dt.isocalendar().day
data["Week"] = dates.dt.isocalendar().week
data["Day of week"] = dates.dt.dayofweek
return data
def create_train_data(final_data, date:dt.datetime):
date = date.date() if type(date) == dt.datetime else date
bool_index = pd.to_datetime(final_data['Date']).dt.date <= date
train_data = final_data[bool_index]
return train_data
def preprocess(initial_data, country, date):
data = initial_data.groupby(["Country/Region",'Date'])\
.sum()\
.dropna()\
.reset_index()
final_data = data.loc[data['Country/Region']==country].reset_index(drop=True)
final_data = final_data[['Date','Deaths']]
final_data = add_features(final_data)
train_data = create_train_data(final_data, date)
return final_data, train_data
def train_arima(train_data):
model = auto_arima(train_data['Deaths'],
start_p=1, start_q=1,
max_p=5, max_q=5,
start_P=0, seasonal=False,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True)
model.fit(train_data['Deaths'])
return model
def forecast(model):
predictions = model.predict(n_periods=60)
return np.array(predictions)
def concat(final_data, predictions_arima, predictions_linear_regression, date):
def _convert_predictions(final_data, predictions, date, label='Predictions'):
dates = pd.to_datetime([date + dt.timedelta(days=i)
for i in range(len(predictions))])
final_data['Date'] = pd.to_datetime(final_data['Date'])
final_data = final_data[['Date','Deaths']]
predictions = pd.concat([pd.Series(dates, name="Date"),
pd.Series(predictions, name=label)], axis=1)
return final_data.merge(predictions, on="Date", how="outer")
result_arima = _convert_predictions(final_data, predictions_arima, date, label='ARIMA')
result_linear_regression = _convert_predictions(final_data, predictions_linear_regression, date, label='Linear Regression')
return result_arima.merge(result_linear_regression, on=["Date", 'Deaths'], how="outer").sort_values(by='Date')
def train_linear_regression(train_data):
y = train_data['Deaths']
X = train_data.drop(['Deaths','Date'], axis=1)
model = LinearRegression()
model.fit(X,y)
return model
def forecast_linear_regression(model, date):
dates = pd.to_datetime([date + dt.timedelta(days=i)
for i in range(60)])
X = add_features(pd.DataFrame({"Date":dates}))
X.drop('Date', axis=1, inplace=True)
predictions = model.predict(X)
return predictions
|
# Create app for demo-covid-dashboard data.py
import pandas as pd
path_to_data = "data/covid-19-all.csv"
data = pd.read_csv(path_to_data, low_memory=False)
|
# Create app for demo-covid-dashboard root.md
<|toggle|theme|>
<center>
<|navbar|>
</center>
|
# Create app for demo-covid-dashboard root.py
from taipy.gui import Markdown
import numpy as np
from data.data import data
selector_country = list(np.sort(data['Country/Region'].astype(str).unique()))
selected_country = 'France'
root = Markdown("pages/root.md")
|
# Create app for demo-covid-dashboard world.py
from taipy.gui import Markdown
import numpy as np
import json
from data.data import data
type_selector = ['Absolute', 'Relative']
selected_type = type_selector[0]
def initialize_world(data):
data_world = data.groupby(["Country/Region",
'Date'])\
.sum()\
.reset_index()
with open("data/pop.json","r") as f:
pop = json.load(f)
data_world['Population'] = data_world['Country/Region'].map(lambda x: pop.get(x, [None, 0])[1])
data_world = data_world.dropna()\
.reset_index()
data_world['Deaths/100k'] = data_world.loc[:,'Deaths']/data_world.loc[:,'Population']*100000
data_world_pie_absolute = data_world[['Country/Region', 'Deaths', 'Recovered', 'Confirmed']].groupby(["Country/Region"])\
.max()\
.sort_values(by='Deaths', ascending=False)[:20]\
.reset_index()
data_world_pie_relative = data_world[['Country/Region', 'Deaths/100k']].groupby(["Country/Region"])\
.max()\
.sort_values(by='Deaths/100k', ascending=False)[:20]\
.reset_index()
country_absolute = data_world_pie_absolute['Country/Region'].unique().tolist()
country_relative = data_world_pie_relative.loc[:,'Country/Region'].unique().tolist()
data_world_evolution_absolute = data_world[data_world['Country/Region'].str.contains('|'.join(country_absolute),regex=True)]
data_world_evolution_absolute = data_world_evolution_absolute.pivot(index='Date', columns='Country/Region', values='Deaths')\
.reset_index()
data_world_evolution_relative = data_world[data_world['Country/Region'].str.contains('|'.join(country_relative),regex=True)]
data_world_evolution_relative = data_world_evolution_relative.pivot(index='Date', columns='Country/Region', values='Deaths/100k')\
.reset_index()
return data_world, data_world_pie_absolute, data_world_pie_relative, data_world_evolution_absolute, data_world_evolution_relative
data_world,\
data_world_pie_absolute, data_world_pie_relative,\
data_world_evolution_absolute, data_world_evolution_relative = initialize_world(data)
data_world_evolution_absolute_properties = {"x":"Date"}
cols = [col for col in data_world_evolution_absolute.columns if col != "Date"]
for i in range(len(cols)):
data_world_evolution_absolute_properties[f'y[{i}]'] = cols[i]
data_world_evolution_relative_properties = {"x":"Date"}
cols = [col for col in data_world_evolution_relative.columns if col != "Date"]
for i in range(len(cols)):
data_world_evolution_relative_properties[f'y[{i}]'] = cols[i]
world_md = Markdown("pages/world/world.md")
|
# Create app for demo-covid-dashboard world.md
# **World**{: .color-primary} Statistics
<br/>
<|layout|columns=1 1 1 1|gap=50px|
<|card|
**Deaths**{: .color-primary}
<|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Deaths']))).replace(',', ' ')}|text|class_name=h2|>
|>
<|card|
**Recovered**{: .color-primary}
<|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Recovered']))).replace(',', ' ')}|text|class_name=h2|>
|>
<|part|class_name=card|
**Confirmed**{: .color-primary}
<|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Confirmed']))).replace(',', ' ')}|text|class_name=h2|>
|>
|>
<br/>
<|{selected_type}|toggle|lov={type_selector}|>
<|part|render={selected_type=='Absolute'}|
<|layout|columns=1 2|
<|{data_world_pie_absolute}|chart|type=pie|labels=Country/Region|values=Deaths|title=Distribution around the World|>
<|{data_world_evolution_absolute}|chart|properties={data_world_evolution_absolute_properties}|title=Evolution around the World|>
|>
|>
<|part|render={selected_type=='Relative'}|
<|layout|columns=1 2|
<|{data_world_pie_relative}|chart|type=pie|labels=Country/Region|values=Deaths/100k|>
<|{data_world_evolution_relative}|chart|properties={data_world_evolution_relative_properties}|>
|>
|>
|
# Create app for demo-covid-dashboard map.md
# **Map**{: .color-primary} Statistics
<|{data_province_displayed}|chart|type=scattermapbox|lat=Latitude|lon=Longitude|marker={marker_map}|layout={layout_map}|text=Text|mode=markers|height=800px|options={options}|>
|
# Create app for demo-covid-dashboard map.py
import numpy as np
from taipy.gui import Markdown
from data.data import data
marker_map = {"color":"Deaths", "size": "Size", "showscale":True, "colorscale":"Viridis"}
layout_map = {
"dragmode": "zoom",
"mapbox": { "style": "open-street-map", "center": { "lat": 38, "lon": -90 }, "zoom": 3}
}
options = {"unselected":{"marker":{"opacity":0.5}}}
def initialize_map(data):
data['Province/State'] = data['Province/State'].fillna(data["Country/Region"])
data_province = data.groupby(["Country/Region",
'Province/State',
'Longitude',
'Latitude'])\
.max()
data_province_displayed = data_province[data_province['Deaths']>10].reset_index()
data_province_displayed['Size'] = np.sqrt(data_province_displayed.loc[:,'Deaths']/data_province_displayed.loc[:,'Deaths'].max())*80 + 3
data_province_displayed['Text'] = data_province_displayed.loc[:,'Deaths'].astype(str) + ' deaths </br> ' + data_province_displayed.loc[:,'Province/State']
return data_province_displayed
data_province_displayed = initialize_map(data)
map_md = Markdown("pages/map/map.md")
|
# Create app for demo-covid-dashboard country.md
# **Country**{: .color-primary} Statistics
<|layout|columns=1 1 1|
<|{selected_country}|selector|lov={selector_country}|on_change=on_change_country|dropdown|label=Country|>
<|{selected_representation}|toggle|lov={representation_selector}|on_change=convert_density|>
|>
<br/>
<|layout|columns=1 1 1 1|gap=50px|
<|card|
**Deaths**{: .color-primary}
<|{'{:,}'.format(int(data_country_date.iloc[-1]['Deaths'])).replace(',', ' ')}|text|class_name=h2|>
|>
<|card|
**Recovered**{: .color-primary}
<|{'{:,}'.format(int(data_country_date.iloc[-1]['Recovered'])).replace(',', ' ')}|text|class_name=h2|>
|>
<|card|
**Confirmed**{: .color-primary}
<|{'{:,}'.format(int(data_country_date.iloc[-1]['Confirmed'])).replace(',', ' ')}|text|class_name=h2|>
|>
|>
<br/>
<|layout|columns=2 1|
<|{data_country_date}|chart|type=bar|x=Date|y[3]=Deaths|y[2]=Recovered|y[1]=Confirmed|layout={layout}|options={options}|title=Covid Evolution|>
<|{pie_chart}|chart|type=pie|values=values|labels=labels|title=Distribution between cases|>
|>
|
# Create app for demo-covid-dashboard country.py
import numpy as np
import pandas as pd
from taipy.gui import Markdown
from data.data import data
selected_country = 'France'
data_country_date = None
representation_selector = ['Cumulative', 'Density']
selected_representation = representation_selector[0]
layout = {'barmode':'stack', "hovermode":"x"}
options = {"unselected":{"marker":{"opacity":0.5}}}
def initialize_case_evolution(data, selected_country='France'):
# Aggregation of the dataframe to erase the regions that will not be used here
data_country_date = data.groupby(["Country/Region",'Date'])\
.sum()\
.reset_index()
# a country is selected, here France by default
data_country_date = data_country_date.loc[data_country_date['Country/Region']==selected_country]
return data_country_date
data_country_date = initialize_case_evolution(data)
pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"],"values": [data_country_date.iloc[-1, 6], data_country_date.iloc[-1, 5], data_country_date.iloc[-1, 4]]})
def convert_density(state):
if state.selected_representation == 'Density':
df_temp = state.data_country_date.copy()
df_temp['Deaths'] = df_temp['Deaths'].diff().fillna(0)
df_temp['Recovered'] = df_temp['Recovered'].diff().fillna(0)
df_temp['Confirmed'] = df_temp['Confirmed'].diff().fillna(0)
state.data_country_date = df_temp
else:
state.data_country_date = initialize_case_evolution(data, state.selected_country)
def on_change_country(state):
# state contains all the Gui variables and this is through this state variable that we can update the Gui
# state.selected_country, state.data_country_date, ...
# update data_country_date with the right country (use initialize_case_evolution)
print("Chosen country: ", state.selected_country)
state.data_country_date = initialize_case_evolution(data, state.selected_country)
state.pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"],
"values": [state.data_country_date.iloc[-1, 6], state.data_country_date.iloc[-1, 5], state.data_country_date.iloc[-1, 4]]})
convert_density(state)
country_md = Markdown("pages/country/country.md")
|
# Create app for demo-covid-dashboard predictions.py
from taipy.gui import Markdown, notify
import datetime as dt
selected_data_node = None
selected_scenario = None
selected_date = None
default_result = {"Date": [dt.datetime(2020,10,1)], "Deaths": [0], "ARIMA": [0], "Linear Regression": [0]}
def on_submission_change(state, submitable, details):
if details['submission_status'] == 'COMPLETED':
state.refresh('selected_scenario')
notify(state, "success", "Predictions ready!")
print("Predictions ready!")
elif details['submission_status'] == 'FAILED':
notify(state, "error", "Submission failed!")
print("Submission failed!")
else:
notify(state, "info", "In progress...")
print("In progress...")
def on_change_params(state):
if state.selected_date.year < 2020 or state.selected_date.year > 2021:
notify(state, "error", "Invalid date! Must be between 2020 and 2021")
state.selected_date = dt.datetime(2020,10,1)
return
state.selected_scenario.date.write(state.selected_date.replace(tzinfo=None))
state.selected_scenario.country.write(state.selected_country)
notify(state, "success", "Scenario parameters changed!")
state['Country'].on_change_country(state)
def on_change(state, var_name, var_value):
if var_name == 'selected_scenario' and var_value:
state.selected_date = state.selected_scenario.date.read()
state.selected_country = state.selected_scenario.country.read()
predictions_md = Markdown("pages/predictions/predictions.md")
|
# Create app for demo-covid-dashboard predictions.md
<|layout|columns=2 9|gap=50px|
<sidebar|sidebar|
**Scenario** Creation
<|{selected_scenario}|scenario_selector|>
|sidebar>
<scenario|part|render={selected_scenario}|
# **Prediction**{: .color-primary} page
<|1 1|layout|
<date|
#### First **day**{: .color-primary} of prediction
<|{selected_date}|date|on_change=on_change_params|>
|date>
<country|
#### **Country**{: .color-primary} of prediction
<|{selected_country}|selector|lov={selector_country}|dropdown|on_change=on_change_params|label=Country|>
|country>
|>
<|{selected_scenario}|scenario|on_submission_change=on_submission_change|not expanded|>
---------------------------------------
## **Predictions**{: .color-primary} and explorer of data nodes
<|{selected_scenario.result.read() if selected_scenario and selected_scenario.result.read() is not None else default_result}|chart|x=Date|y[1]=Deaths|y[2]=Linear Regression|y[3]=ARIMA|type[1]=bar|title=Predictions|>
<|Data Nodes|expandable|
<|1 5|layout|
<|{selected_data_node}|data_node_selector|>
<|{selected_data_node}|data_node|>
|>
|>
|scenario>
|>
|
# Create app for demo-yearly-prediction main.py
from config.config import configure
from pages import scenario_page
from pages.root import root, selected_scenario, selected_data_node, content
import taipy as tp
from taipy import Core, Gui, Config
def on_init(state):
...
def on_change(state, var, val):
if var == "selected_scenario" and val:
state.selected_scenario = val # BUG
state.selected_data_node = None
if var == "selected_data_node" and val:
state.selected_data_node = val # BUG
state["scenario"].manage_data_node_partial(state)
pages = {
"/": root,
"scenario": scenario_page,
}
if __name__ == "__main__":
# Instantiate, configure and run the Core
scenario_cfg = configure()
tp.Core().run()
scenario = tp.create_scenario(scenario_cfg)
tp.submit(scenario)
print(scenario.prediction.read())
# Instantiate, configure and run the GUI
gui = Gui(pages=pages)
data_node_partial = gui.add_partial("")
gui.run(title="Yearly Sales Prediction")
|
# Create app for demo-yearly-prediction config.py
from taipy import Config
from taipy.config import Frequency, Scope
from algos import clean_data, filter_data, predict
def configure():
historical_data_cfg = Config.configure_data_node(
"historical_data",
storage_type="csv",
default_path="historical_data.csv",
scope=Scope.GLOBAL,
)
model_cfg = Config.configure_data_node(
"model", default_data="linear", scope=Scope.SCENARIO
)
prediction_year_cfg = Config.configure_data_node(
"prediction_year", default_data="2016", scope=Scope.CYCLE
)
last_two_years_cfg = Config.configure_data_node("last_two_years", scope=Scope.CYCLE)
prediction_cfg = Config.configure_data_node("prediction", scope=Scope.SCENARIO)
cleaned_data_cfg = Config.configure_data_node("cleaned_data", scope=Scope.GLOBAL)
clean_data_cfg = Config.configure_task(
id="clean_data",
function=clean_data,
input=[historical_data_cfg],
output=[cleaned_data_cfg],
)
filter_data_cfg = Config.configure_task(
id="filter_data",
function=filter_data,
input=[cleaned_data_cfg, prediction_year_cfg],
output=[last_two_years_cfg],
)
predict_cfg = Config.configure_task(
id="predict",
function=predict,
input=[last_two_years_cfg, model_cfg, historical_data_cfg, prediction_year_cfg],
output=[prediction_cfg],
)
scenario_cfg = Config.configure_scenario(
id="prediction_scenario",
task_configs=[clean_data_cfg, filter_data_cfg, predict_cfg],
frequency=Frequency.YEARLY,
)
return scenario_cfg
|
# Create app for demo-yearly-prediction __init__.py
|
# Create app for demo-yearly-prediction algos.py
import pandas as pd
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
def clean_data(historical_data: pd.DataFrame) -> pd.DataFrame:
"""
Transforms sales data into total sales per month
Args:
historical_data: historical sales dataframe (date, store, item, sales)
Returns:
monthly sales dataframe (date, sales)
"""
historical_data["date"] = pd.to_datetime(historical_data["date"])
historical_data["date"] = (
historical_data["date"].dt.year.astype("str")
+ "-"
+ historical_data["date"].dt.month.astype("str")
+ "-01"
)
historical_data["date"] = pd.to_datetime(historical_data["date"])
historical_data = historical_data.groupby("date").sales.sum().reset_index()
return historical_data
def filter_data(cleaned_data: pd.DataFrame, prediction_year: str) -> pd.DataFrame:
"""
Filters data to include only data from the two years before the prediction year
Args:
cleaned_data: monthly sales dataframe (date, sales)
prediction_year: year to predict
Returns:
filtered dataframe (date, sales)
"""
start_date = str(int(prediction_year) - 2) + "-01-01"
end_date = str(int(prediction_year) - 1) + "-12-01"
filtered_data = cleaned_data[
(cleaned_data["date"] >= start_date) & (cleaned_data["date"] <= end_date)
]
return filtered_data
def predict(
last_two_years: pd.DataFrame,
model: str,
historical_data: pd.DataFrame,
prediction_year: str,
) -> pd.DataFrame:
"""
Predicts sales for the prediction year according to the model
Args:
last_two_years: filtered dataframe (date, sales)
model: model to use for prediction (linear, arima)
Returns:
predicted sales for the prediction year (date, sales)
"""
predicted_data = pd.DataFrame()
if model == "linear":
X = last_two_years.index.values.reshape(-1, 1)
y = last_two_years["sales"].values.reshape(-1, 1)
model = LinearRegression()
model.fit(X, y)
predicted_data["date"] = pd.date_range(
start=f"{prediction_year}-01-01", end=f"{prediction_year}-12-01", freq="MS"
)
predicted_data["sales"] = model.predict(
pd.DataFrame(
{"date": predicted_data["date"].astype("str")}
).index.values.reshape(-1, 1)
)
elif model == "arima":
train_data = last_two_years.copy()
train_data.set_index("date", inplace=True)
model = sm.tsa.statespace.SARIMAX(
last_two_years["sales"], order=(1, 1, 1), seasonal_order=(1, 1, 1, 12)
)
results = model.fit()
predicted_data = results.predict(12)
predicted_data = predicted_data[1:]
predicted_data = pd.DataFrame(
{
"date": pd.date_range(
start=f"{prediction_year}-02-01",
end=f"{prediction_year}-12-01",
freq="MS",
),
"sales": predicted_data.values,
}
)
else:
raise ValueError("Model not supported")
# Combine last_two_years and predicted_data with columns: date, actual, predicted
combined_data = pd.DataFrame()
combined_data["date"] = pd.date_range(
start=f"{int(prediction_year)-2}-01-01",
end=f"{prediction_year}-12-01",
freq="MS",
)
combined_data = combined_data.merge(
last_two_years, how="left", on="date", suffixes=("", "_actual")
)
combined_data = combined_data.merge(
predicted_data, how="left", on="date", suffixes=("", "_predicted")
)
return combined_data
|
# Create app for demo-yearly-prediction __init__.py
from .algos import clean_data, filter_data, predict
|
# Create app for demo-yearly-prediction root.md
<|layout|columns=1 5|
<|sidebar|
<|{selected_scenario}|scenario_selector|>
<|part|render={selected_scenario}|
<|{selected_data_node}|data_node_selector|not display_cycles|>
|>
|>
<|part|class_name=main|render={selected_scenario}|
<|content|>
|>
|>
|
# Create app for demo-yearly-prediction __init__.py
from .scenario_page import scenario_page
|
# Create app for demo-yearly-prediction root.py
from taipy.gui import Markdown
selected_scenario = None
selected_data_node = None
content = ""
root = Markdown("pages/root.md")
|
# Create app for demo-yearly-prediction scenario_page.py
from taipy.gui import Markdown
from .data_node_management import manage_partial
def manage_data_node_partial(state):
manage_partial(state)
scenario_page = Markdown("pages/scenario_page/scenario_page.md")
|
# Create app for demo-yearly-prediction __init__.py
from .scenario_page import scenario_page
|
# Create app for demo-yearly-prediction data_node_management.py
# build partial content for a specific data node
def build_dn_partial(dn, dn_label):
partial_content = "<|part|render={selected_scenario}|\n\n"
# ##################################################################################################################
# PLACEHOLDER: data node specific content before automatic content #
# #
# Example: #
if dn_label == "replacement_type":
partial_content += "All missing values will be replaced by the data node value."
# Comment, remove or replace the previous lines with your own use case #
# ##################################################################################################################
# Automatic data node content
partial_content += "<|{selected_scenario.data_nodes['" + dn.config_id + "']}|data_node|scenario={selected_scenario}|>\n\n"
# ##################################################################################################################
# PLACEHOLDER: data node specific content after automatic content #
# #
# Example: #
if dn_label == "initial_dataset":
partial_content += "Select your CSV file: <|{selected_data_node.path}|file_selector|extensions=.csv|>\n\n"
# Comment, remove or replace the previous lines with your own use case #
# ##################################################################################################################
partial_content += "|>\n\n"
return partial_content
def manage_partial(state):
dn = state.selected_data_node
dn_label = dn.get_simple_label()
partial_content = build_dn_partial(dn, dn_label)
state.data_node_partial.update_content(state, partial_content)
|
# Create app for demo-yearly-prediction scenario_page.md
<|layout|columns=1 1|
<|part|render={selected_scenario}|
<|{selected_scenario}|scenario|not expandable|expanded|>
<|{selected_scenario}|scenario_dag|>
|>
<|part|partial={data_node_partial}|render={selected_data_node}|>
|>
|
# Create app for demo-image-classification-part-1 demo-image_classifcation-taipy-cloud.py
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
class_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train / 255.0
y_train = to_categorical(y_train, len(class_names))
x_test = x_test / 255.0
y_test = to_categorical(y_test, len(class_names))
#########################################################################################################
def create_model():
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
df = pd.read_csv("saved_models/df.csv")
df["N_Epochs"] = range(1,len(df)+1)
#STATE VARIABLES
model = None
# Parameters for models & training
epochs = 1
input_model_name = "model"
# Parameters for trained model
trained_model_path = ""
# Parameters for CIFAR dataset
cifar_image_index = 10
cifar_image_path = "images/sample/taipy.jpg"
cifar_predicted_label = 'NA'
cifar_true_label = 'NA'
# Parameters for online image
online_image_url = "URL"
online_image_path = "images/sample/airplane.jpg"
online_image_count = 0
online_image_predicted_label = 'NA' # predicted label for the online image
#P1
from taipy import Gui
from taipy.gui import invoke_long_callback, notify
import urllib
p1 = """
<center><h1>Image Classification CNN</h1></center>
<|layout|columns=1 3|
<|
## PARAMETERS
Enter chosen optimal numper of epochs:
<|{epochs}|input|>
Register model name:
<|{input_model_name}|input|>
Train the model with the Training + Validation sets:
<|START TRAINING|button|on_action=train_button|>
### Upload Trained Model
<|{trained_model_path}|file_selector|label=Upload trained model|on_action=load_trained_model|extensions=.h5|>
|>
<|
<center><h2> Val_loss and Accuracy </h2></center>
<|{df}|chart|x=N_Epochs|y[1]=accuracy|y[2]=val_accuracy|>
|>
|>
___
"""
def merged_train(model,number_of_epochs,name):
# merge the training and validation sets
#x_all = np.concatenate((x_train, x_test))
#y_all = np.concatenate((y_train, y_test))
# train with the merged dataset
#history = model.fit(
# datagen.flow(x_all, y_all, batch_size=64),
# epochs=number_of_epochs)
#model.save("saved_models/{}.h5".format(name),save_format='h5')
print("TRAINING & SAVING COMPLETED!")
def train_button(state):
notify(state, "info", "Started training model with {} epochs".format(state.epochs), True, 1000)
#model = create_model()
invoke_long_callback(state,merged_train,[model, int(state.epochs), state.input_model_name])
def load_trained_model(state):
loaded_model = tf.keras.models.load_model(state.trained_model_path)
state.model = loaded_model
#Second half of the applications
p2 = """
<|layout|columns=1 3|
<|
### CIFAR10 Images Prediction
Enter CIFAR10 image index: |
<|{cifar_image_index}|input|>
<|PREDICT CIFAR IMAGE|button|on_action=predict_cifar_image|>
<|{cifar_image_path}|image|height=100px|width=100px|>
##Predicted label: <|{cifar_predicted_label}|>
##True label: <|{cifar_true_label}|>
|>
<|
###Paste an online image link here for prediction:
<|{online_image_url}|input|on_action=load_online_image|>
<center> <|{online_image_path}|image|height=300px|width=300px|> </center>
<|PREDICT ONLINE IMAGE|button|on_action=predict_online_image|>
## Predicted label: <|{online_image_predicted_label }|>
|>
|>
"""
def predict_cifar_image(state):
#Retrieve the cifar image at the specified index and save as PIL Image obj
cifar_img_idx = int(state.cifar_image_index )
cifar_img_data = x_test[cifar_img_idx]
cifar_img = Image.fromarray(np.uint8(cifar_img_data*255))
cifar_img.save("images/cifar10_saved/{}.jpg".format(cifar_img_idx))
#Predict the label of the CIFAR image
img_for_pred = np.expand_dims(x_test[cifar_img_idx], axis=0)
cifar_img_pred_label = np.argmax(state.model.predict(img_for_pred))
cifar_img_true_label = y_test[cifar_img_idx].argmax()
#Update the GUI
state.cifar_image_path = "images/cifar10_saved/{}.jpg".format(cifar_img_idx)
state.cifar_predicted_label = str(class_names[cifar_img_pred_label])
state.cifar_true_label = str(class_names[cifar_img_true_label])
def load_online_image(state):
urllib.request.urlretrieve(state.online_image_url, "images/online_image.jpg")
state.online_image_path = "images/online_image.jpg"
def predict_online_image(state):
#Retrieve & save online image in order to show on the image box
urllib.request.urlretrieve(state.online_image_url , "images/saved_images/{}.jpg".format(state.online_image_count))
state.online_image_path = "images/saved_images/{}.jpg".format(state.online_image_count)
#Predict the label of the online image
img_array = tf.keras.utils.load_img(state.online_image_path, target_size=(32, 32))
image = tf.keras.utils.img_to_array(img_array) # (height, width, channels)
image = np.expand_dims(image, axis=0) / 255. # (1, height, width, channels) + normalize
#Update the GUI
state.online_image_predicted_label = class_names[np.argmax(state.model.predict(image))]
state.online_image_count += 1
Gui(page=p1+p2).run(dark_mode=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.