text
stringlengths
0
105k
from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.document_loaders import TextLoader loader = TextLoader("data/data_2.0.txt") # Use this line if you only need data.txt text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=0) data = loader.load() texts = text_splitter.split_documents(data) from langchain.vectorstores import Chroma, Pinecone from langchain.embeddings.openai import OpenAIEmbeddings from dotenv import load_dotenv import os import time load_dotenv() OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) print(len(texts)) for i in range(0, len(texts), 100): try: db2 = Chroma.from_documents( texts[i : i + min(100, len(texts) - i)], embeddings, persist_directory="chroma_db", ) except ValueError: pass time.sleep(10) from langchain.chains import RetrievalQAWithSourcesChain from langchain import OpenAI from langchain.vectorstores import Chroma from langchain.embeddings.openai import OpenAIEmbeddings import os embeddings = OpenAIEmbeddings() docsearch = Chroma(persist_directory="chroma_db", embedding_function=embeddings) chain = RetrievalQAWithSourcesChain.from_chain_type( OpenAI(temperature=0), chain_type="stuff", retriever=docsearch.as_retriever(), reduce_k_below_max_tokens=True, ) user_input = input("What's your question: ") result = chain({"question": user_input}, return_only_outputs=True) print("Answer: " + result["answer"].replace("\n", " ")) print("Source: " + result["sources"])
from langchain.embeddings.openai import OpenAIEmbeddings from dotenv import load_dotenv import os load_dotenv() OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") from langchain.chains import RetrievalQAWithSourcesChain from langchain import OpenAI from langchain.vectorstores import Chroma from langchain.embeddings.openai import OpenAIEmbeddings from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.text_splitter import CharacterTextSplitter from langchain.llms import OpenAI from langchain.chains import ConversationalRetrievalChain from langchain.memory import ConversationBufferMemory memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) import os embeddings = OpenAIEmbeddings() docsearch = Chroma(persist_directory="chroma_db", embedding_function=embeddings) chain = RetrievalQAWithSourcesChain.from_chain_type( OpenAI(temperature=0), chain_type="stuff", retriever=docsearch.as_retriever(), reduce_k_below_max_tokens=True, ) qa = ConversationalRetrievalChain.from_llm( OpenAI(temperature=0), docsearch.as_retriever(), memory=memory, ) while True: user_input = input("What's your question: ") if len(user_input) == 0: break print( docsearch.similarity_search_with_score( query=user_input, distance_metric="cos", k=6 ) ) result = qa({"question": user_input}) print("Answer: " + result["answer"])
import os import sys import openai from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.document_loaders import DirectoryLoader, TextLoader from langchain.embeddings import OpenAIEmbeddings from langchain.indexes import VectorstoreIndexCreator from langchain.indexes.vectorstore import VectorStoreIndexWrapper from langchain.llms import OpenAI from langchain.vectorstores import Chroma import constants os.environ["OPENAI_API_KEY"] = constants.APIKEY # Enable to save to disk & reuse the model (for repeated queries on the same data) PERSIST = False query = input("What's your question?") if PERSIST and os.path.exists("persist"): print("Reusing index...\n") vectorstore = Chroma( persist_directory="persist", embedding_function=OpenAIEmbeddings() ) index = VectorStoreIndexWrapper(vectorstore=vectorstore) else: # loader = TextLoader("data/data.txt") # Use this line if you only need data.txt loader = DirectoryLoader("data/") if PERSIST: index = VectorstoreIndexCreator( vectorstore_kwargs={"persist_directory": "persist"} ).from_loaders([loader]) else: index = VectorstoreIndexCreator().from_loaders([loader]) chain = RetrievalQA.from_chain_type( llm=ChatOpenAI(model="gpt-3.5-turbo"), retriever=index.vectorstore.as_retriever(search_kwargs={"k": 1}), ) print(chain.run(query))
s = open("data/data.txt", "r") s = s.read().replace("\n", "") with open("data/new_data.txt", "w") as x: x.write(s)
import requests from bs4 import BeautifulSoup from urllib.parse import urlparse, urljoin def scrape_domain_and_subdomains(base_url, file_path): visited_urls = set() def scrape(url): response = requests.get(url) if response.status_code == 200: soup = BeautifulSoup(response.content, "html.parser") div_elements = soup.select("div.md-content") text = "" for div in div_elements: text += div.get_text() append_to_text_file(text, file_path) links = soup.find_all("a") for link in links: href = link.get("href") if href: subdomain_url = urljoin(url, href) parsed_url = urlparse(subdomain_url) if ( parsed_url.netloc.endswith("docs.taipy.io") and subdomain_url not in visited_urls ): visited_urls.add(subdomain_url) scrape(subdomain_url) scrape(f"http://{base_url}") print(visited_urls) def append_to_text_file(content, file_path): with open(file_path, "a", encoding="utf-8") as file: file.write(content) file.write("\n") # Example usage: base_url = "docs.taipy.io/en/latest/" # Replace with the base domain to scrape (without the protocol) file_path = "data/data_2.0.txt" # Replace with the desired file path scrape_domain_and_subdomains(base_url, file_path)
# This is a sample Python script. # Press Maj+F10 to execute it or replace it with your code. # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. def print_hi(name): # Use a breakpoint in the code line below to debug your script. print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint. # Press the green button in the gutter to run the script. if __name__ == '__main__': print_hi('PyCharm') # See PyCharm help at https://www.jetbrains.com/help/pycharm/
import taipy as tp from taipy.gui import Gui, notify from taipy.config import Config import numpy as np import pandas as pd BUSINESS_PATH = "data/yelp_business.csv" # Load the business data using pandas business_df = pd.read_csv(BUSINESS_PATH) # Remove quotation marks from the name business_df.name = business_df.name.str[1:-1] # Taipy Core Config.load("config/config.toml") Config.configure_data_node(id="review_data", read_fct_params=("data/yelp_review.csv",)) scenario_object = Config.scenarios["scenario"] business_name = business_df.name[0] reviews = None def on_selection(state): """ Re-runs the scenario when the user selects a business. Args: - state: state of the app """ notify(state, "info", "Running query...") business_scenarios = [ s for s in tp.get_scenarios() if s.name == state.business_name ] if len(business_scenarios) > 0: scenario = business_scenarios[0] else: scenario = tp.create_scenario(scenario_object, name=state.business_name) scenario.business_name.write(state.business_name) tp.submit(scenario) state.reviews = scenario.parsed_reviews.read() notify(state, "success", "Query finished") page = """ # Querying **Big Data**{: .color-primary} with Taipy and Dask ## Select a **business**{: .color-primary} <|{business_name}|selector|lov={list(business_df.name)}|dropdown|on_change=on_selection|> ## Average **stars**{: .color-primary} for that business: <|{"⭐"*int(np.mean(reviews.stars))}|text|raw|> <|{round(np.mean(reviews.stars),2)}|indicator|value={np.mean(reviews.stars)}|min=1|max=5|width=30%|> ## **Reviews**{: .color-primary} for that business: <|{reviews}|table|width=100%|> """ def on_init(state): scenario = tp.create_scenario(scenario_object, name=state.business_name) tp.submit(scenario) state.reviews = scenario.parsed_reviews.read() if __name__ == "__main__": tp.Core().run() Gui(page).run()
import dask.dataframe as dd def get_data(path_to_csv: str, optional: str = None): """ Loads a csv file into a dask dataframe. Converts the date column to datetime. Args: - path_to_csv: path to the csv file - optional: optional argument (currently necessary to fix Core bug with generic data nodes) Returns: - dataset: dask dataframe """ dataset = dd.read_csv(path_to_csv) dataset["date"] = dd.to_datetime(dataset["date"]) return dataset def write_function(): """ Useless function to fix Core bug with generic data nodes. """ return None
import pandas as pd import dask.dataframe as dd def get_id_from_name(name: str, business_dict: dict): """ Returns the business_id from the name of the business. Args: - name: name of the business - business_dict: dict with the name as key and the business_id as value Returns: - business_id: id of the business """ business_id = business_dict[name] return business_id def parse_business_data(data: dd.DataFrame): """ Parses the reviews of a business. Args: - data: dask dataframe with the reviews of a business Returns: - data: dask dataframe with the parsed reviews """ # Sort data by useful data = data.sort_values(by="useful", ascending=False) # Keep only the stars, date and text columns data = data[["stars", "date", "text"]] return data def get_business_data(business_id: str, data: dd.DataFrame): """ Returns a dask dataframe with the reviews of a business. Args: - business_id: id of the business - data: dask dataframe with the reviews Returns: - df_business: dask dataframe with the reviews of the business """ df_business = data[(data.business_id == business_id)].compute() return df_business def create_business_dict(business_df: pd.DataFrame): """ Creates a dict with the name as key and the business_id as value. Args: - business_df: pandas dataframe with the business data Returns: - business_dict: dict with the name as key and the business_id as value """ # Remove quotation marks from the name business_df.name = business_df.name.str[1:-1] business_dict = dict(zip(business_df.name, business_df.business_id)) print(business_dict) return business_dict
import time import pandas as pd import dask.dataframe as dd def task1(path_to_original_data: str): print("__________________________________________________________") print("1. TASK 1: DATA PREPROCESSING AND CUSTOMER SCORING ...") start_time = time.perf_counter() # Start the timer # Step 1: Read data using Dask df = dd.read_csv(path_to_original_data) # Step 2: Simplify the customer scoring formula df['CUSTOMER_SCORE'] = ( 0.5 * df['TotalPurchaseAmount'] / 1000 + 0.3 * df['NumberOfPurchases'] / 10 + 0.2 * df['AverageReviewScore'] ) # Save all customers to a new CSV file scored_df = df[["CUSTOMER_SCORE", "TotalPurchaseAmount", "NumberOfPurchases", "TotalPurchaseTime"]] pd_df = scored_df.compute() end_time = time.perf_counter() # Stop the timer execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds print(f"Time of Execution: {execution_time:.4f} ms") return pd_df def task2(scored_df, payment_threshold, score_threshold): print("__________________________________________________________") print("2. TASK 2: FEATURE ENGINEERING AND SEGMENTATION ...") payment_threshold, score_threshold = float(payment_threshold), float(score_threshold) start_time = time.perf_counter() # Start the timer df = scored_df # Feature: Indicator if customer's total purchase is above the payment threshold df['HighSpender'] = (df['TotalPurchaseAmount'] > payment_threshold).astype(int) # Feature: Average time between purchases df['AverageTimeBetweenPurchases'] = df['TotalPurchaseTime'] / df['NumberOfPurchases'] # Additional computationally intensive features df['Interaction1'] = df['TotalPurchaseAmount'] * df['NumberOfPurchases'] df['Interaction2'] = df['TotalPurchaseTime'] * df['CUSTOMER_SCORE'] df['PolynomialFeature'] = df['TotalPurchaseAmount'] ** 2 # Segment customers based on the score_threshold df['ValueSegment'] = ['High Value' if score > score_threshold else 'Low Value' for score in df['CUSTOMER_SCORE']] end_time = time.perf_counter() # Stop the timer execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds print(f"Time of Execution: {execution_time:.4f} ms") return df def task3(df: pd.DataFrame, metric): print("__________________________________________________________") print("3. TASK 3: SEGMENT ANALYSIS ...") start_time = time.perf_counter() # Start the timer # Detailed analysis for each segment: mean/median of various metrics segment_analysis = df.groupby('ValueSegment').agg({ 'CUSTOMER_SCORE': metric, 'TotalPurchaseAmount': metric, 'NumberOfPurchases': metric, 'TotalPurchaseTime': metric, 'HighSpender': 'sum', # Total number of high spenders in each segment 'AverageTimeBetweenPurchases': metric }).reset_index() end_time = time.perf_counter() # Stop the timer execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds print(f"Time of Execution: {execution_time:.4f} ms") return segment_analysis def task4(df: pd.DataFrame, segment_analysis: pd.DataFrame, summary_statistic_type: str): print("__________________________________________________________") print("4. TASK 4: ADDITIONAL ANALYSIS BASED ON SEGMENT ANALYSIS ...") start_time = time.perf_counter() # Start the timer # Filter out the High Value customers high_value_customers = df[df['ValueSegment'] == 'High Value'] # Use summary_statistic_type to calculate different types of summary statistics if summary_statistic_type == 'mean': average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].mean() elif summary_statistic_type == 'median': average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].median() elif summary_statistic_type == 'max': average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].max() elif summary_statistic_type == 'min': average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].min() median_score_high_value = high_value_customers['CUSTOMER_SCORE'].median() # Fetch the summary statistic for 'TotalPurchaseAmount' for High Value customers from segment_analysis segment_statistic_high_value = segment_analysis.loc[segment_analysis['ValueSegment'] == 'High Value', 'TotalPurchaseAmount'].values[0] # Create a DataFrame to hold the results result_df = pd.DataFrame({ 'SummaryStatisticType': [summary_statistic_type], 'AveragePurchaseHighValue': [average_purchase_high_value], 'MedianScoreHighValue': [median_score_high_value], 'SegmentAnalysisHighValue': [segment_statistic_high_value] }) end_time = time.perf_counter() # Stop the timer execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds print(f"Time of Execution: {execution_time:.4f} ms") return result_df if __name__ == "__main__": t1 = task1("data/SMALL_amazon_customers_data.csv") t2 = task2(t1, 1500, 1.5) t3 = task3(t2, "mean") t4 = task4(t2, t3, "mean") print(t4)
from recsys.recsys import page_scenario_manager from taipy.gui import Gui gui = Gui(page_scenario_manager) gui.run()
import pandas as pd import numpy as np TEST_RATIO = 0.2 MOVIELENS_DATA_PATH = "u.data" MOVIE_DATA_PATH = "u.item" def convert_data_to_dataframe(data_path: str, movie_path: str): data = pd.read_table(data_path) data.columns = ["u_id", "i_id", "rating", "timestamp"] data_sort = data.sort_values(by=["u_id"]) movie_name = pd.read_table( movie_path, sep='|', encoding="latin-1", header=None) movie_name.drop([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], inplace=True, axis=1) movie_name.rename(columns={0: "movieId", 1: "title"}, inplace=True) return data_sort, movie_name movies_list = [] print("\nReading the ratings file...") ratings, movie_name = convert_data_to_dataframe( MOVIELENS_DATA_PATH, MOVIE_DATA_PATH) userIds = ratings.u_id.unique() trainIds = [] testIds = [] print("Splitting the ratings data...") for u in userIds: rating_of_u = ratings.loc[ratings.u_id == u] trainIds_sample = rating_of_u.sample( frac=(1-TEST_RATIO), random_state=7) testIds_sample = rating_of_u.drop(trainIds_sample.index.tolist()) for _, rating in trainIds_sample.iterrows(): if rating.i_id not in movies_list: # Append new movie's Id to the movie list movies_list.append(rating.u_id) trainIds_sample = trainIds_sample.index.values trainIds_sample.sort() trainIds = np.append(trainIds, trainIds_sample) testIds_sample = testIds_sample.index.values testIds_sample.sort() testIds = np.append(testIds, testIds_sample) print("Done.") print("Write ratings to new file...") train = ratings.loc[ratings.index.isin(trainIds)] train.to_csv("rating_train.csv", index=False) test = ratings.loc[ratings.index.isin(testIds)] test.to_csv("rating_test.csv", index=False) movie_name.to_csv("movie.csv", index=False) print("Done.")
import pandas as pd from datetime import datetime data = pd.read_csv("dataset/rating_train.csv") timestamp = data.timestamp.to_list() date = [] for time in timestamp: date.append(datetime.fromtimestamp(time)) data["timestamp"] = date data.to_csv("data.csv", index=False)
import pandas as pd import numpy as np from scipy import sparse class DataLoader: def __init__(self): self.__train_data = None self.__val_data = None self.__test_data = None def __create_id_mapping(self): if self.__val_data: unique_uIds = pd.concat( [self.__train_data.u_id, self.__test_data.u_id, self.__val_data.u_id] ).unique() unique_iIds = pd.concat( [self.__train_data.i_id, self.__test_data.i_id, self.__val_data.i_id] ).unique() else: unique_uIds = pd.concat( [self.__train_data.u_id, self.__test_data.u_id] ).unique() unique_iIds = pd.concat( [self.__train_data.i_id, self.__test_data.i_id] ).unique() self.user_dict = {uId: idx for idx, uId in enumerate(unique_uIds)} self.item_dict = {iId: idx for idx, iId in enumerate(unique_iIds)} def __preprocess(self, data): """Map the id of all users and items according to user_dict and item_dict. To create the user_dict, all user ID in the training set is first sorted, then the first ID is map to 0 and so on. Do the same for item_dict. This process is done via `self.__create_id_mapping()`. Args: data (Dataframe): The dataset that need to be preprocessed. Returns: ndarray: The array with all id mapped. """ # data['u_id'] = data['u_id'].replace(self.user_dict) # data['i_id'] = data['i_id'].replace(self.item_dict) data["u_id"] = data["u_id"].map(self.user_dict) data["i_id"] = data["i_id"].map(self.item_dict) # Tag unknown users/items with -1 (when val) data.fillna(-1, inplace=True) data["u_id"] = data["u_id"].astype(np.int32) data["i_id"] = data["i_id"].astype(np.int32) return data[["u_id", "i_id", "rating"]].values def load_csv2ndarray( self, train_data, test_data, val_path="rating_val.csv", use_val=False, columns=["u_id", "i_id", "rating", "timestamp"], ): """ Load training set, validate set and test set via `.csv` file. Each as `ndarray`. Args: train_path (string): path to the training set csv file inside self.__data_folder test_path (string): path to the testing set csv file inside self.__data_folder val_path (string): path to the validating set csv file inside self.__data_folder use_val (boolean): Denote if loading validate data or not. Defaults to False. columns (list): Columns name for DataFrame. Defaults to ['u_id', 'i_id', 'rating', 'timestamp']. Returns: train, val, test (np.array): Preprocessed data. """ self.__train_data = train_data self.__test_data = test_data if use_val: self.__val_data = self.__read_csv(val_path, columns) self.__create_id_mapping() self.__train_data = self.__preprocess(self.__train_data) self.__test_data = self.__preprocess(self.__test_data) if use_val: self.__val_data = self.__preprocess(self.__val_data) return self.__train_data, self.__val_data, self.__test_data else: return self.__train_data, self.__test_data def load_genome_fromcsv( self, genome_file="genome_scores.csv", columns=["i_id", "g_id", "score"], reset_index=False, ): """ Load genome scores from file. Args: genome_file (string): File name that contain genome scores. Must be in csv format. columns (list, optional): Columns name for DataFrame. Must be ["i_id", "g_id", "score"] or ["i_id", "score", "g_id"]. reset_index (boolean): Reset the genome_tag column or not. Defaults to False. Returns: scores (DataFrame) """ genome = pd.read_csv( self.__genome_folder + "/" + genome_file, header=0, names=columns ) if reset_index: tag_map = { genome.g_id: newIdx for newIdx, genome in genome.loc[genome.i_id == 1].iterrows() } genome["g_id"] = genome["g_id"].map(tag_map) genome["i_id"] = genome["i_id"].map(self.item_dict) genome.fillna(0, inplace=True) return sparse.csr_matrix( (genome["score"], (genome["i_id"].astype(int), genome["g_id"].astype(int))) ).toarray()
from taipy import Config, Scope from functions.funtions import preprocess_data, fit, predict from config.svd_config import ( n_epochs_cfg, n_factors_cfg, learning_rate_cfg, qi_cfg, bi_cfg, bu_cfg, pu_cfg, ) from config.kNN_config import ( x_id_cfg, n_k_neighboor_cfg, n_min_k_cfg, sim_measure_cfg, list_ur_ir_cfg, similarity_matrix_cfg, users_id_cfg, items_id_cfg, ) TRAIN_DATA_PATH = "dataset/rating_train.csv" TEST_DATA_PATH = "dataset/rating_test.csv" MOVIE_DATA_PATH = "dataset/movie.csv" train_dataset_cfg = Config.configure_data_node( id="train_dataset", storage_type="csv", path=TRAIN_DATA_PATH, scope=Scope.GLOBAL, cacheable=True, ) test_dataset_cfg = Config.configure_data_node( id="test_dataset", storage_type="csv", path=TEST_DATA_PATH, scope=Scope.GLOBAL, cacheable=True, ) movie_name_cfg = Config.configure_data_node( id="movie_name", storage_type="csv", path=MOVIE_DATA_PATH, scope=Scope.GLOBAL, cacheable=True, ) trainset_cfg = Config.configure_data_node( id="trainset", scope=Scope.PIPELINE, cacheable=True) testset_cfg = Config.configure_data_node( id="testset", scope=Scope.PIPELINE, cacheable=True) true_testset_movies_id_cfg = Config.configure_data_node( id="true_testset_movies_id", scope=Scope.PIPELINE, cacheable=True ) algorithm_cfg = Config.configure_in_memory_data_node( id="algorithm", default_data="kNN") global_mean_cfg = Config.configure_data_node( id="global_mean", cope=Scope.GLOBAL, cacheable=True ) predictions_cfg = Config.configure_data_node( id="predictions", scope=Scope.PIPELINE) # Config task load_data_task_cfg = Config.configure_task( id="load_data", function=preprocess_data, input=[train_dataset_cfg, test_dataset_cfg, movie_name_cfg], output=[trainset_cfg, testset_cfg, true_testset_movies_id_cfg], ) train_data_task_cfg = Config.configure_task( id="train_data", function=fit, input=[ trainset_cfg, sim_measure_cfg, n_factors_cfg, n_epochs_cfg, learning_rate_cfg, algorithm_cfg, ], output=[ similarity_matrix_cfg, list_ur_ir_cfg, users_id_cfg, items_id_cfg, global_mean_cfg, pu_cfg, qi_cfg, bu_cfg, bi_cfg, ], ) predict_task_cfg = Config.configure_task( id="predict_task", function=predict, input=[ testset_cfg, x_id_cfg, list_ur_ir_cfg, similarity_matrix_cfg, n_k_neighboor_cfg, n_min_k_cfg, users_id_cfg, items_id_cfg, global_mean_cfg, true_testset_movies_id_cfg, bu_cfg, bi_cfg, pu_cfg, qi_cfg, algorithm_cfg, ], output=predictions_cfg, ) # Config pipeline pipeline_cfg = Config.configure_pipeline( id="pipeline", task_configs=[load_data_task_cfg, train_data_task_cfg, predict_task_cfg], )
from taipy import Config, Scope x_id_cfg = Config.configure_data_node(id="x_id", default_data=1) n_min_k_cfg = Config.configure_data_node(id="n_min_k", default_data=10) n_k_neighboor_cfg = Config.configure_data_node( id="n_k_neighboor", default_data=1) sim_measure_cfg = Config.configure_in_memory_data_node( id="sim_measure", default_data="pcc") list_ur_ir_cfg = Config.configure_data_node( id="list_ur_ir", cope=Scope.GLOBAL, cacheable=True) similarity_matrix_cfg = Config.configure_data_node( id="similarity_matrix", cope=Scope.GLOBAL, cacheable=True) items_id_cfg = Config.configure_data_node( id="x_list", cope=Scope.GLOBAL, cacheable=True) users_id_cfg = Config.configure_data_node( id="y_list", cope=Scope.GLOBAL, cacheable=True)
from taipy import Config, Scope n_factors_cfg = Config.configure_data_node(id="n_factors", default_data=30) n_epochs_cfg = Config.configure_data_node(id="n_epochs", default_data=50) learning_rate_cfg = Config.configure_data_node(id="learning_rate", default_data=0.001) qi_cfg = Config.configure_data_node(id="qi", cope=Scope.GLOBAL, cacheable=True) pu_cfg = Config.configure_data_node(id="pu", cope=Scope.GLOBAL, cacheable=True) bu_cfg = Config.configure_data_node(id="bu", cope=Scope.GLOBAL, cacheable=True) bi_cfg = Config.configure_data_node(id="bi", cope=Scope.GLOBAL, cacheable=True)
import taipy as tp from taipy.config import Config from taipy.gui import notify, Markdown import pandas as pd import numpy as np from helper.knn_helper import calculate_precision_recall from config.config import pipeline_cfg Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() scenario_cfg = Config.configure_scenario( id="scenario", pipeline_configs=pipeline_cfg) dataset = pd.read_csv("dataset/data.csv") dataset["timestamp"] = pd.to_datetime(dataset["timestamp"]) sim_measure_selector = ["pcc", "cosine"] selected_sim_measure = sim_measure_selector[0] model_selector = ["kNN", "MF"] selected_model = sim_measure_selector[0] # set up parameter of kNN model n_min_k, n_k_neighboor, x_id, top_k = 1, 10, 1, 1 # set up parameter of svd model n_epochs, n_factors, learning_rate = 30, 40, 0.001 results, y_id, y_id_real, results_real, recall, precision = ( None, None, None, None, None, None,) all_scenarios = tp.get_scenarios() [tp.delete(scenario.id) for scenario in all_scenarios if scenario.name is None] scenario_selector = [(scenario.id, scenario.name) for scenario in tp.get_scenarios()] def create_scenario(state): global selected_scenario print("Creating scenario...") scenario = tp.create_scenario(scenario_cfg) scenario.sim_measure.write(str(state.selected_sim_measure)) scenario.algorithm.write(str(state.selected_model)) scenario.n_min_k.write(int(state.n_min_k)) scenario.n_k_neighboor.write(int(state.n_k_neighboor)) scenario.x_id.write(int(state.x_id)) scenario.n_factors.write(int(state.n_factors)) scenario.n_epochs.write(int(state.n_epochs)) scenario.learning_rate.write(float(learning_rate)) selected_scenario = scenario.id update_scenario_selector(state, scenario) tp.submit(scenario) def submit_scenario(state): ( state.y_id, state.y_id_real, state.results_real, state.recall, state.precision, state.results, ) = ( None, None, [], None, None, [], ) print("Submitting scenario...") # Get the selected scenario: in this current step a single scenario is created then modified here. scenario = tp.get(selected_scenario) # Change the default parameters by writing in the datanodes if scenario.sim_measure.read() != state.selected_sim_measure: scenario.sim_measure.write(str(state.selected_sim_measure)) if scenario.n_min_k.read() != state.n_min_k: scenario.n_min_k.write(int(state.n_min_k)) if scenario.n_k_neighboor.read() != state.n_k_neighboor: scenario.n_k_neighboor.write(int(state.n_k_neighboor)) if scenario.x_id.read() != state.x_id: scenario.x_id.write(int(state.x_id)) if scenario.n_factors.read() != state.n_factors: scenario.n_factors.write(int(state.n_factors)) if scenario.n_epochs.read() != state.n_epochs: scenario.n_epochs.write(int(state.n_epochs)) if scenario.learning_rate.read() != state.learning_rate: scenario.learning_rate.write(float(learning_rate)) if scenario.algorithm.read() != state.selected_model: scenario.algorithm.write(str(state.selected_model)) # Execute the pipelines/code tp.submit(scenario) def update_scenario_selector(state, scenario): print("Updating scenario selector...") # Update the scenario selector state.scenario_selector += [(scenario.id, scenario.name)] def take_all_movies_rated_by_x_id(test_set, x_id): test_items = [] test_set = test_set.copy() for index in range(test_set.shape[0]): if test_set[index][0] == x_id: test_items.append(test_set[index]) test_items = np.array(test_items) return test_items def predicts(state): id, predict, id_real, predict_real, user_ratings = ( [], [], [], [], [],) scenario = tp.get(selected_scenario) movie_name = (scenario.movie_name.read()).title.to_numpy() print("'Predict' button clicked") result = scenario.predictions.read() result = result[result[:, 4].argsort()[::-1]] if state.top_k > result.shape[0]: notify( state, notification_type="error", message="Out of range, top_k max = {}".format(result.shape[0]), ) state.top_k = result.shape[0] top_k = state.top_k test_set = scenario.testset.read() true_testset_movies_id = scenario.true_testset_movies_id.read() test_set[:, 1] = true_testset_movies_id.T test_items = take_all_movies_rated_by_x_id(test_set, state.x_id) test_items = test_items[test_items[:, 2].argsort()[::-1]] for i in range(int(top_k)): id.append(int(result[i][3])) id_real.append(int(test_items[i][1])) for i, j in zip(id, id_real): predict.append(movie_name[(i - 1)]) predict_real.append(movie_name[(j - 1)]) state.y_id = np.array2string( np.array(id), precision=2, separator=", ", suppress_small=True) state.y_id_real = np.array2string( np.array(id_real), precision=2, separator=", ", suppress_small=True) state.results = predict state.results_real = predict_real for _, _, true_r, _, est in result: user_ratings.append([est, true_r]) user_ratings = np.array(user_ratings) state.precision, state.recall = calculate_precision_recall( user_ratings, top_k, 3) page_scenario_manager = Markdown("recsys/recsys.md")
import pandas as pd import numpy as np from ultis.dataloader import DataLoader from helper.svd_helper import sgd, predict_svd_pair from helper.knn_helper import predict_pair, compute_similarity_matrix VALID_ALGORITHM = ["kNN", "MF"] def preprocess_data( train_data: pd.DataFrame, test_data: pd.DataFrame, movie_name: pd.DataFrame ): """Preprocess data from Pd.DataFrame. Args: train_data (pd.DataFrame): Train data load from "csv" file. test_data (pd.DataFrame): Test data load from "csv" file. movie_name (pd.DataFrame): The name of movies to recommend load from "csv" file. Returns: train_set (numpy.array): Preprocessed training data. test_set (numpy.array): Preprocessed testing data. true_testset_movies_id (numpydarray): When convert, the movie_ids are reseting index, list all actual movies_id. """ loader = DataLoader() true_testset_movies_id = test_data.i_id.to_numpy() train_set, test_set = loader.load_csv2ndarray( train_data=train_data, test_data=test_data, columns=["u_id", "i_id", "rating", "timestamp"], ) return train_set, test_set, true_testset_movies_id def fit( X, sim_measure="pcc", n_factors=100, n_epochs=50, learning_rate=0.01, algorithm="kNN", ): """Train the recommendation model. Args: X (numpyarray): Training data. sim_measure (str, optional): Similarity measure function. Defaults to "pcc". n_factors (int, optional): Number of latent factors. Defaults to 100. n_epochs (int, optional): Number of SGD iterations. Defaults to 50. learning_rate (float, optional): The common learning rate. Defaults to 0.01. algorithm (str, optional): The algorithm used to make recommendations. Possible values are "kNN" or "MF. Defaults to "kNN". Returns: S (numpyarray): Compute the similarity between all pairs of users. x_rated (numpyarray): All users who rated each item are stored in list. x_list (numpyarray): All user id in training set. y_list (numpyarray): All movie id in training set. global_mean (float): Mean ratings in training set. pu (numpyarray): Users latent factor matrix. qi (numpyarray): Items latent factor matrix. bu (numpyarray): Users biases vector. bi (numpyarray): Items biases vector. """ if algorithm not in VALID_ALGORITHM: raise SystemExit( f"{algorithm} is not a valid algorithm. Possible values are {VALID_ALGORITHM}.\n") global_mean = np.mean(X[:, 2]) if algorithm == "kNN": S, x_rated, x_list, y_list = compute_similarity_matrix( X, sim_measure ) pu, qi, bu, bi = [], [], [], [] elif algorithm == "MF": users_list = np.unique(X[:, 0]) items_list = np.unique(X[:, 1]) n_user = users_list.shape[0] n_item = items_list.shape[0] # Initialize pu, qi, bu, bi qi = np.random.normal(0, 0.1, (n_item, n_factors)) pu = np.random.normal(0, 0.1, (n_user, n_factors)) bu = np.zeros(n_user) bi = np.zeros(n_item) lr_pu, lr_qi, lr_bu, lr_bi, reg_pu, reg_qi, reg_bu, reg_bi = ( learning_rate, learning_rate, learning_rate, learning_rate, learning_rate, learning_rate, learning_rate, learning_rate,) pu, qi, bu, bi, _ = sgd( X, pu, qi, bu, bi, n_epochs, global_mean, n_factors, lr_pu, lr_qi, lr_bu, lr_bi, reg_pu, reg_qi, reg_bu, reg_bi, ) S, x_rated, x_list, y_list = [], [], [], [] else: S, x_rated, x_list, y_list, pu, qi, bu, bi = ( 0, 0, 0, 0, 0, 0, 0, 0,) return S, x_rated, x_list, y_list, global_mean, pu, qi, bu, bi def predict( test_set_origin, x_id, x_rated, S, k, min_k, x_list, y_list, global_mean, true_testset_movies_id, bu, bi, pu, qi, algorithm="MF", ): """Predict the ratings to movies. Args: test_set_origin (numpyarray): Storing all user/item pairs we want to predict the ratings. x_id (int): The user_id we want to recommend to him. x_rated (numpyarray): All users who rated each item are stored in list. S (numpyarray): Compute the similarity between all pairs of users. k (int): Number of neighbors use in prediction. min_k (int): The minimum number of neighbors to take into account for aggregation. If there are not enough neighbors, the neighbor aggregation is set to zero x_list (numpyarray): All user id in training set. y_list (numpyarray): All movie id in training set. global_mean (float): Mean ratings in training set. true_testset_movies_id (numpyarray): List all actual movie id. bu (numpyarray): Users biases vector. bi (numpyarray): Items biases vector. pu (numpyarray): Users latent factor matrix. qi (numpyarray): Items latent factor matrix. algorithm (str, optional): The algorithm used to make recommendations. Possible values are "kNN" or "MF". Defaults to "MF". Returns: predictions (numpyarray): Storing all predictions of the given user/item pairs. The first column is user id, the second column is item id, the thirh colums is the actual movie id, the forth column is the observed rating, and the fifth column is the predicted rating. """ if algorithm not in VALID_ALGORITHM: raise SystemExit( f"{algorithm} is not a valid algorithm. Possible values are {VALID_ALGORITHM}.\n") test_items = [] test_set = np.zeros( (test_set_origin.shape[0], test_set_origin.shape[1] + 1)) test_set[:, :3] = test_set_origin test_set[:, 3] = true_testset_movies_id.T test_set[:, [0, 1]] = test_set[:, [1, 0]] for index in range(test_set.shape[0]): if test_set[index][1] == x_id: test_items.append(test_set[index]) test_items = np.array(test_items) n_pairs = test_items.shape[0] predictions = np.zeros((n_pairs, test_items.shape[1] + 1)) predictions[:, :4] = test_items if algorithm == "MF": for pair in range(n_pairs): predictions[pair, 4] = predict_svd_pair( test_items[pair, 0].astype(int), test_items[pair, 1].astype(int), global_mean, bu, bi, pu, qi, ) elif algorithm == "kNN": for pair in range(n_pairs): predictions[pair, 4] = predict_pair( test_items[pair, 0].astype(int), test_items[pair, 1].astype(int), x_rated, S, k, min_k, x_list, y_list, global_mean, ) np.clip(predictions[:, 4], 0.5, 5, out=predictions[:, 4]) return predictions
import numpy as np from numba import njit def compute_similarity_matrix(train_set, sim_measure="pcc"): x_rated, _, n_x, _, x_list, y_list = list_ur_ir(train_set) if sim_measure == "pcc": print("Computing similarity matrix as pcc...") S = pcc(n_x, x_rated, min_support=1) elif sim_measure == "cosine": print("Computing similarity matrix as cosine...") S = cosine(n_x, x_rated, min_support=1) else: S = 0 return S, x_rated, x_list, y_list def fit_train_set(train_set): X = train_set.copy() # Swap user_id column to movie_id column if using iiCF X[:, [0, 1]] = X[:, [1, 0]] x_list = np.unique(X[:, 0]) # For uuCF, x -> user y_list = np.unique(X[:, 1]) # For uuCF, y -> item n_x = len(x_list) n_y = len(y_list) return n_x, n_y, X, x_list, y_list def list_ur_ir(train_set): n_x, n_y, X, x_list, y_list = fit_train_set(train_set) x_rated = [ [] for _ in range(n_y) ] # List where element `i` is ndarray of `(x, rating)` where `x` is all x that rated y, and the ratings. y_ratedby = [ [] for _ in range(n_x) ] # List where element `i` is ndarray of `(y, rating)` where `y` is all y that rated by x, and the ratings. for xid, yid, r in X: x_rated[int(yid)].append([xid, r]) y_ratedby[int(xid)].append([yid, r]) for yid in range(n_y): x_rated[yid] = np.array(x_rated[yid]) for xid in range(n_x): y_ratedby[xid] = np.array(y_ratedby[xid]) return x_rated, y_ratedby, n_x, n_y, x_list, y_list def pcc(n_x, yr, min_support=1): """Compute the Pearson coefficient correlation between all pairs of users (or items). Only **common** users (or items) are taken into account. """ prods = np.zeros((n_x, n_x), np.double) freq = np.zeros((n_x, n_x), np.int) sqi = np.zeros((n_x, n_x), np.double) sqj = np.zeros((n_x, n_x), np.double) si = np.zeros((n_x, n_x), np.double) sj = np.zeros((n_x, n_x), np.double) for y_ratings in yr: prods, freq, sqi, sqj, si, sj = run_pearson_params( prods, freq, sqi, sqj, si, sj, y_ratings ) sim = calculate_pearson_similarity( prods, freq, sqi, sqj, si, sj, n_x, min_support) return sim @njit def run_pearson_params(prods, freq, sqi, sqj, si, sj, y_ratings): for xi, ri in y_ratings: xi = int(xi) for xj, rj in y_ratings: xj = int(xj) freq[xi, xj] += 1 prods[xi, xj] += ri * rj sqi[xi, xj] += ri**2 sqj[xi, xj] += rj**2 si[xi, xj] += ri sj[xi, xj] += rj return prods, freq, sqi, sqj, si, sj @njit def calculate_pearson_similarity(prods, freq, sqi, sqj, si, sj, n_x, min_sprt): sim = np.zeros((n_x, n_x), np.double) for xi in range(n_x): sim[xi, xi] = 1 for xj in range(xi + 1, n_x): if freq[xi, xj] < min_sprt: sim[xi, xj] = 0 else: n = freq[xi, xj] num = n * prods[xi, xj] - si[xi, xj] * sj[xi, xj] denum = np.sqrt( (n * sqi[xi, xj] - si[xi, xj] ** 2) * (n * sqj[xi, xj] - sj[xi, xj] ** 2) ) if denum == 0: sim[xi, xj] = 0 else: sim[xi, xj] = num / denum sim[xj, xi] = sim[xi, xj] return sim def predict_pair(x_id, y_id, x_rated, S, k, min_k, x_list, y_list, global_mean): x_known, y_known = False, False if x_id in x_list: x_known = True if y_id in y_list: y_known = True if not (x_known and y_known): # if uuCF: # print(f"Can not predict rating of user {x_id} for item {y_id}.") # else: # print(f"Can not predict rating of user {y_id} for item {x_id}.") return global_mean return _predict(x_id, y_id, x_rated[y_id], S, k, min_k) def _predict(x_id, y_id, x_rated, S, k, k_min): k_neighbors = np.zeros((k, 2)) k_neighbors[:, 1] = -1 # All similarity degree is default to -1 for x2, rating in x_rated: if int(x2) == x_id: continue # Bo qua item dang xet sim = S[int(x2), x_id] argmin = np.argmin(k_neighbors[:, 1]) if sim > k_neighbors[argmin, 1]: k_neighbors[argmin] = np.array((sim, rating)) # Compute weighted average sum_sim = sum_ratings = actual_k = 0 for (sim, r) in k_neighbors: if sim > 0: sum_sim += sim sum_ratings += sim * r actual_k += 1 if actual_k < k_min: sum_ratings = 0 if sum_sim: est = sum_ratings / sum_sim return est return 0 def cosine(n_x, yr, min_support=1): """Compute the cosine similarity between all pairs of users (or items). Only **common** users (or items) are taken into account. """ prods = np.zeros((n_x, n_x), np.double) freq = np.zeros((n_x, n_x), np.int) sqi = np.zeros((n_x, n_x), np.double) sqj = np.zeros((n_x, n_x), np.double) for y_ratings in yr: prods, freq, sqi, sqj = run_cosine_params( prods, freq, sqi, sqj, y_ratings) sim = calculate_cosine_similarity(prods, freq, sqi, sqj, n_x, min_support) return sim @njit def run_cosine_params(prods, freq, sqi, sqj, y_ratings): for xi, ri in y_ratings: xi = int(xi) for xj, rj in y_ratings: xj = int(xj) freq[xi, xj] += 1 prods[xi, xj] += ri * rj sqi[xi, xj] += ri**2 sqj[xi, xj] += rj**2 return prods, freq, sqi, sqj @njit def calculate_cosine_similarity(prods, freq, sqi, sqj, n_x, min_sprt): sim = np.zeros((n_x, n_x), np.double) for xi in range(n_x): sim[xi, xi] = 1 for xj in range(xi + 1, n_x): if freq[xi, xj] < min_sprt: sim[xi, xj] = 0 else: denum = np.sqrt(sqi[xi, xj] * sqj[xi, xj]) sim[xi, xj] = prods[xi, xj] / denum sim[xj, xi] = sim[xi, xj] return sim @njit def calculate_precision_recall(user_ratings, k, threshold): """Calculate the precision and recall at k metric for the user based on his/her obversed rating and his/her predicted rating. Args: user_ratings (ndarray): An array contains the predicted rating in the first column and the obversed rating in the second column. k (int): the k metric. threshold (float): relevant threshold. Returns: (precision, recall): the precision and recall score for the user. """ # Sort user ratings by estimated value # user_ratings = user_ratings[user_ratings[:, 0].argsort()][::-1] # Number of relevant items n_rel = 0 for _, true_r in user_ratings: if true_r >= threshold: n_rel += 1 # Number of recommended items in top k n_rec_k = 0 for est, _ in user_ratings[:k]: if est >= threshold: n_rec_k += 1 # Number of relevant and recommended items in top k n_rel_and_rec_k = 0 for (est, true_r) in user_ratings[:k]: if true_r >= threshold and est >= threshold: n_rel_and_rec_k += 1 # Precision@K: Proportion of recommended items that are relevant # When n_rec_k is 0, Precision is undefined. We here set it to 0. if n_rec_k != 0: precision = n_rel_and_rec_k / n_rec_k else: precision = 0 # Recall@K: Proportion of relevant items that are recommended # When n_rel is 0, Recall is undefined. We here set it to 0. if n_rel != 0: recall = n_rel_and_rec_k / n_rel else: recall = 0 return precision, recall
import numpy as np from numba import njit def sgd( X, pu, qi, bu, bi, n_epochs, global_mean, n_factors, lr_pu, lr_qi, lr_bu, lr_bi, reg_pu, reg_qi, reg_bu, reg_bi, ): for epoch_ix in range(n_epochs): pu, qi, bu, bi, train_loss = _run_svd_epoch( X, pu, qi, bu, bi, global_mean, n_factors, lr_pu, lr_qi, lr_bu, lr_bi, reg_pu, reg_qi, reg_bu, reg_bi, ) return pu, qi, bu, bi, train_loss @njit def _run_svd_epoch( X, pu, qi, bu, bi, global_mean, n_factors, lr_pu, lr_qi, lr_bu, lr_bi, reg_pu, reg_qi, reg_bu, reg_bi, ): """Runs an SVD epoch, updating model weights (pu, qi, bu, bi). Args: X (ndarray): the training set. pu (ndarray): users latent factor matrix. qi (ndarray): items latent factor matrix. bu (ndarray): users biases vector. bi (ndarray): items biases vector. global_mean (float): ratings arithmetic mean. n_factors (int): number of latent factors. lr_pu (float, optional): Pu's specific learning rate. lr_qi (float, optional): Qi's specific learning rate. lr_bu (float, optional): bu's specific learning rate. lr_bi (float, optional): bi's specific learning rate. reg_pu (float, optional): Pu's specific regularization term. reg_qi (float, optional): Qi's specific regularization term. reg_bu (float, optional): bu's specific regularization term. reg_bi (float, optional): bi's specific regularization term. Returns: pu (ndarray): the updated users latent factor matrix. qi (ndarray): the updated items latent factor matrix. bu (ndarray): the updated users biases vector. bi (ndarray): the updated items biases vector. train_loss (float): training loss. """ residuals = [] for i in range(X.shape[0]): user, item, rating = int(X[i, 0]), int(X[i, 1]), X[i, 2] # Predict current rating pred = global_mean + bu[user] + bi[item] for factor in range(n_factors): pred += pu[user, factor] * qi[item, factor] err = rating - pred residuals.append(err) # Update biases bu[user] += lr_bu * (err - reg_bu * bu[user]) bi[item] += lr_bi * (err - reg_bi * bi[item]) # Update latent factors for factor in range(n_factors): puf = pu[user, factor] qif = qi[item, factor] pu[user, factor] += lr_pu * (err * qif - reg_pu * puf) qi[item, factor] += lr_qi * (err * puf - reg_qi * qif) residuals = np.array(residuals) train_loss = np.square(residuals).mean() return pu, qi, bu, bi, train_loss @njit def predict_svd_pair(u_id, i_id, global_mean, bu, bi, pu, qi): """Returns the model rating prediction for a given user/item pair. Args: u_id (int): a user id. i_id (int): an item id. Returns: pred (float): the estimated rating for the given user/item pair. """ user_known, item_known = False, False pred = global_mean if u_id != -1: user_known = True pred += bu[u_id] if i_id != -1: item_known = True pred += bi[i_id] if user_known and item_known: pred += np.dot(pu[u_id], qi[i_id]) return pred
import pandas as pd from prophet import Prophet from taipy import Config def clean_data(initial_dataset: pd.DataFrame): print("Cleaning Data") initial_dataset = initial_dataset.rename(columns={"Date": "ds", "Close": "y"}) initial_dataset['ds'] = pd.to_datetime(initial_dataset['ds']).dt.tz_localize(None) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def retrained_model(cleaned_dataset: pd.DataFrame): print("Model Retraining") model = Prophet() model.fit(cleaned_dataset) return model def predict(model): periods = 365 return model.predict(model.make_future_dataframe(periods=periods)[-periods:])[['ds', 'yhat']].rename(columns = {'ds':'Date', 'yhat':'Close_Prediction'}) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", default_path='df.csv') cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset") clean_data_task_cfg = Config.configure_task(id="clean_data_task", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) model_training_cfg = Config.configure_data_node(id="model_output") predictions_cfg = Config.configure_data_node(id="predictions") model_training_task_cfg = Config.configure_task(id="model_retraining_task", function=retrained_model, input=cleaned_dataset_cfg, output=model_training_cfg, skippable=True) predict_task_cfg = Config.configure_task(id="predict_task", function=predict, input=model_training_cfg, output=predictions_cfg, skippable=True) # Create our scenario configuration from our tasks scenario_cfg = Config.configure_scenario_from_tasks(id="stock", task_configs=[clean_data_task_cfg, model_training_task_cfg, predict_task_cfg]) """ import taipy as tp import yfinance as yf # Run of the Taipy Core service tp.Core().run() def get_stock_data(ticker, start): ticker_data = yf.download(ticker, start, dt.datetime.now()).reset_index() # downloading the stock data from START to TODAY ticker_data['Date'] = ticker_data['Date'].dt.tz_localize(None) return ticker_data tickers = {'MSFT':get_stock_data('MSFT', start_date), 'AAPL':get_stock_data('AAPL', start_date), 'GOOG':get_stock_data('GOOG', start_date)} def create_and_submit_scenario(stock_name): scenario_stock = tp.create_scenario(scenario_cfg, name=stock_name) scenario_stock.initial_dataset.path = f"{stock_name}.csv" scenario_stock.initial_dataset.write(tickers[stock_name]) tp.submit(scenario_stock) for stock_name in tickers.keys(): create_and_submit_scenario(stock_name) stocks = tp.get_scenarios() for stock in stocks: print(stock.name) print(stock.predictions.read()) """
from taipy.gui import Gui, notify import pandas as pd import yfinance as yf from taipy.config import Config import taipy as tp import datetime as dt Config.load('config_model_train.toml') scenario_cfg = Config.scenarios['stock'] def get_stock_data(ticker, start): ticker_data = yf.download(ticker, start, dt.datetime.now()).reset_index() # downloading the stock data from START to TODAY ticker_data['Date'] = ticker_data['Date'].dt.tz_localize(None) return ticker_data start_date = '2015-01-01' property_chart = {"type":"lines", "x":"Date", "y[1]":"Open", "y[2]":"Close", "y[3]":"High", "y[4]":"Low", "color[1]":"green", "color[2]":"grey", "color[3]":"red", "color[4]":"yellow" } df = pd.DataFrame([], columns = ['Date', 'High', 'Low', 'Open', 'Close']) df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) stock_text = "No Stock to Show" chart_text = 'No Chart to Show' pred_text = 'No Prediction to Show' stock = "" stocks = [] page = """ <|toggle|theme|> # Stock Portfolio ### Choose the stock to show <|layout|columns=1 1| <|{f'The stock is {stock.name}' if stock else 'No Stock to Show'}|> <|{stock}|selector|lov={stocks}|dropdown|adapter={lambda s: s.name}|> <|Reset|button|on_action=reset|> <|Press for Stock|button|on_action=update_ticker_history|active={stock}|> <|Update Model|button|on_action=update_model|active={stock}|> <|{f'Monthly history of stock {stock.name}' if stock else 'No Chart to Show'}|> <|{df}|chart|properties={property_chart}|> |> <|{f'1 Year Close Prediction of Stock {stock.name}' if stock else 'No Prediction to Show'}|> <|{df_pred}|chart|x=Date|y=Close_Prediction|> """ def reset(state): state.stock = "" state.df = pd.DataFrame([], columns = ['Date', 'High', 'Low', 'Open', 'Close']) state.df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) notify(state, 'success', 'Reset done!') def update_ticker_history(state): state.stock.initial_dataset.write(get_stock_data(state.stock.name, start_date)) on_change(state, "stock", state.stock) notify(state, 'success', 'History up-to-date! You should retrain the model') def on_change(state, var_name, var_value): if var_name == "stock" and var_value: state.df = state.stock.initial_dataset.read() state.df_pred = state.stock.predictions.read() def update_model(state): print("Update Model Clicked") tp.submit(state.stock) on_change(state, "stock", state.stock) notify(state, 'success', 'Model trained and charts up-to-date!') def on_init(state): tickers = {'MSFT':get_stock_data('MSFT', start_date), 'AAPL':get_stock_data('AAPL', start_date), 'GOOG':get_stock_data('GOOG', start_date)} def create_and_submit_scenario(stock_name): scenario_stock = tp.create_scenario(scenario_cfg, name=stock_name) scenario_stock.initial_dataset.path = f"{stock_name}.csv" scenario_stock.initial_dataset.write(tickers[stock_name]) tp.submit(scenario_stock) for stock_name in tickers.keys(): create_and_submit_scenario(stock_name) state.stocks = tp.get_scenarios() state.stock = state.stocks[0] tp.Core().run() Gui(page).run()
from taipy import Gui import pandas as pd # Interactive GUI and state, we maintain states for each individual client # hence we can have multiple clients with each client having its own state, # change of the state of one client will not affect the other client's state. # Each client has its own state and global values are made local to this states, # example n_weeks, can be stored as state.n_weeks hence changing the state of one # client does not affect the global n_week or the state of other clients. # Each time the on_change() is called, three arguments are passed in: # 1. state # 2. var_name, # 3. var_value # where the state is unique to that client that made the call. # Whenever a state of a client get changed, on_change() is called with three # params, this function is a special function in Taipy. n_weeks = 10 def read_data(dataset_path: str): df = pd.read_csv(dataset_path) df["Date"] = pd.to_datetime(df["Date"]) return df dataset = read_data("./dataset/dataset.csv") dataset_week = dataset[dataset["Date"].dt.isocalendar().week == n_weeks] def on_change(state, var_name: str, var_value): if var_name == "n_weeks": state.dataset_week = dataset[dataset["Date"].dt.isocalendar( ).week == var_value] page = """ # Taipy Basics *Week number*: *<|{n_weeks}|>* <|{n_weeks}|slider|min=3|max=52|> <|{dataset_week}|chart|type=bar|x=Date|y=Value|height=100%|width=100%|> """ Gui(page=page).run(dark_mode=False)
from taipy import Gui import pandas as pd # visual elements: Taipy adds visual elements on top of markdown # to give you the ability to add charts, tables... The format for # it is as follows: # <|{variable}|visual_element_name|param_1=param_1|param_2=param_2| ... |>. # variable: python variable eg dataframe # visual_element_name: Name of the visual element eg table # para_1: parameters passed in n_weeks = 10 def read_data(dataset_path: str): df = pd.read_csv(dataset_path) return df dataset = read_data("./dataset/dataset.csv") page = """ # Taipy Basics *Week number*: *<|{n_weeks}|>* <|{n_weeks}|slider|min=2|max=30|> <|{dataset[9000:]}|chart|type=bar|x=Date|y=Value|height=100%|> # Table Format <|{dataset}|table|height=400px|width=95%|> """ Gui(page=page).run(dark_mode=False)
from taipy import Gui # not no empty spaces at the beginning of the markdown # markdown must start from the baseline page = """ ### Hello world """ # Gui(page=page).run(dark_mode=False) # you can specify the port number in the run(port=xxxx) # its by default 5000 Gui(page="Intro to Taipy").run(dark_mode=True)
# 데이터를 처리하기 위한 파이썬의 기본 패키지 from login.login import * import pandas as pd # 타이피 코어 import import taipy as tp # 내 파이썬 코드의 백엔드 가져오기 | 시나리오를 생성하려면 원본 파이프라인_cfg 및 시나리오_cfg가 필요합니다. # fixed_variables_default는 고정 변수의 기본값으로 사용됩니다. from config.config import fixed_variables_default, scenario_cfg, pipeline_cfg from taipy.core.config.config import Config # Taipy 프론트엔드에 유용한 기능 가져오기 from taipy.gui import Gui, Markdown, notify, Icon # 내 파이썬 코드의 프론트엔드 가져오기 | 페이지 가져오기: compare_scenario_md 페이지, 시나리오_매니저_md 페이지, 데이터베이스_md 페이지 # *는 때때로 이 코드의 함수 및/또는 변수가 필요하기 때문에 사용됩니다. # from pages.compare_cycles_md import * from pages.compare_scenario_md import * from pages.databases_md import * from pages.data_visualization_md import * # 임시 파일을 생성하기 위해 import import pathlib # 이 경로는 Datasouces 페이지에서 테이블을 다운로드할 수 있는 임시 파일을 만드는 데 사용됩니다. # tempdir = pathlib.Path(".tmp") tempdir.mkdir(exist_ok=True) PATH_TO_TABLE = str(tempdir / "table.csv") Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() cc_create_scenarios_for_cycle() from pages.scenario_manager_md import * ############################################################################### # 로그인 ############################################################################### def on_change_user_selector(state): global user_selector if state.selected_user == 'Create new user': state.login = '' state.dialog_new_account = True elif state.selected_user in [user[0] for user in user_selector]: state.login = state.selected_user if state.selected_user in state.user_in_session: state.dialog_user = False reinitialize_state_after_login(state) else: state.dialog_login = True else: notify(state, "Warning", "Unexpected error") def reinitialize_state_after_login(state): scenarios = [s for s in tp.get_scenarios( ) if 'user' in s.properties and state.login == s.properties['user']] state.scenario_counter = len(scenarios) state.cs_show_comparaison = False state.password = '' update_scenario_selector(state, scenarios) if state.dialog_new_account: state.selected_scenario = None notify(state, 'info', 'Creating a new session') state.dialog_new_account = False else: if state.scenario_counter != 0: state.selected_scenario = state.scenario_selector[0][0] notify(state, 'info', 'Restoring your session') def validate_login(state, id, action, payload): global user_selector, users # if the button pressed is "Cancel" if payload['args'][0] != 1: state.dialog_login = False state.dialog_new_account = False else: if state.dialog_new_account: if state.login in [user[0] for user in user_selector]: notify(state, 'error', 'This user already exists') elif state.login == '': notify(state, "Warning", "Please enter a valid login") elif state.login != '' and len(state.password) > 0: state.dialog_login = False state.dialog_new_account = False state.dialog_user = False users[state.login] = {} users[state.login]["password"] = encode(state.password) users[state.login]["last_visit"] = str(dt.datetime.now()) json.dump(users, open('login/login.json', 'w')) reinitialize_state_after_login(state) state.user_selector = [ (state.login, Icon( 'images/user.png', state.login))] + state.user_selector user_selector = state.user_selector state.selected_user = state.login state.user_in_session += state.selected_user elif state.login in [user[0] for user in user_selector]: if test_password(users, state.login, state.password): state.dialog_login = False state.dialog_new_account = False state.dialog_user = False state.user_in_session += state.selected_user reinitialize_state_after_login(state) else: notify(state, "Warning", "Wrong password") else: notify(state, "Warning", "Unexpected error") ############################################################################### # main_md ############################################################################### # 메인 마크다운 페이지입니다. 여기에 메인 페이지에 포함된 다른 페이지가 있습니다. # 시나리오_매니저_md, 비교_시나리오_md, 데이터베이스_md는 페이지 변수에 따라 보이게 됩니다. # 이것이 'render' 매개변수의 목적입니다. menu_lov = [ ("Data Visualization", Icon( 'images/chart_menu.svg', 'Data Visualization')), ("Scenario Manager", Icon( 'images/Scenario.svg', 'Scenario Manager')), ("Compare Scenarios", Icon( 'images/compare.svg', 'Compare Scenarios')), ("Compare Cycles", Icon( 'images/Cycle.svg', 'Compare Cycles')), ('Databases', Icon( 'images/Datanode.svg', 'Databases'))] main_md = login_md + """ <|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|id=menu_id|> <|part|render={page == 'Data Visualization'}| """ + da_data_visualisation_md + """ |> <|part|render={page == 'Scenario Manager'}| """ + sm_scenario_manager_md + """ |> <|part|render={page == 'Compare Scenarios'}| """ + cs_compare_scenario_md + """ |> <|part|render={page == 'Compare Cycles'}| """ + cc_compare_cycles_md + """ |> <|part|render={page == 'Databases'}| """ + da_databases_md + """ |> """ ############################################################################### # 시나리오 생성/제출/처리를 위한 중요한 기능 ############################################################################### def update_scenario_selector(state, scenarios: list): """ 이 기능은 시나리오 선택기를 업데이트합니다. 새 시나리오를 만들 때 사용됩니다. 시나리오가 생성되면 이 목록에 (id,name)을 추가합니다. Args: scenarios (list): a list of tuples (scenario,properties) """ state.scenario_selector = [(s.id, s.name) if not s.is_primary else ( s.id, Icon('images/main.svg', s.name)) for s in scenarios] state.scenario_counter = len(state.scenario_selector) state.scenario_selector_two = state.scenario_selector.copy() sm_tree_dict[state.sm_selected_year][state.sm_selected_month] = state.scenario_selector def make_primary(state): tp.set_primary(tp.get(state.selected_scenario)) scenarios = [s for s in tp.get_scenarios( ) if 'user' in s.properties and state.login == s.properties['user']] update_scenario_selector(state, scenarios) state.selected_scenario_is_primary = True def delete_scenario_fct(state): if tp.get(state.selected_scenario).is_primary: notify( state, "warning", "You can't delete the primary scenario of the month") else: tp.delete(state.selected_scenario) scenarios = [s for s in tp.get_scenarios( ) if 'user' in s.properties and state.login == s.properties['user']] update_scenario_selector(state, scenarios) if state.scenario_counter != 0: state.selected_scenario = state.scenario_selector[0][0] def create_new_scenario(state): """ 이 기능은 cenario_manager_md 페이지에서 '만들기' 버튼을 눌렀을 때 사용합니다. 자세한 내용은 시나리오_매니저_md 페이지를 참조하세요. 다른 시나리오를 구성하고 작성하여 제출합니다. Args: state (_type_): the state object of Taipy """ # 시나리오 카운터 업데이트 state.scenario_counter += 1 print("Creating scenario...") name = "Scenario " + dt.datetime.now().strftime('%d-%b-%Y') + " Nb : " + \ str(state.scenario_counter) scenario = tp.create_scenario(scenario_cfg, name=name) scenario.properties['user'] = state.login # 모든 시나리오와 해당 속성을 가져옵니다. print("Getting properties...") scenarios = [s for s in tp.get_scenarios( ) if 'user' in s.properties and state.login == s.properties['user']] # 선택한 시나리오를 변경합니다. 새 시나리오는 선택한 시나리오입니다. # state.selected_scenario = scenario.id # 시나리오 선택기를 업데이트합니다. print("Updating scenario selector...") update_scenario_selector(state, scenarios) # 이 시나리오를 제출 print("Submitting it...") submit_scenario(state) def catch_error_in_submit(state): """ 이 함수는 시나리오를 제출할 때 발생할 수 있는 오류를 잡는 데 사용됩니다. 오류가 포착되면 알림이 표시되고 오류를 방지하기 위해 변수가 변경됩니다. 오류는 고정 변수가 잘못 설정되면 실행 불가능하거나 무한한 문제가 발생할 수 있는 Cplex 모델의 솔루션에서 발생합니다. Args: state (_type_): the state object of Taipy """ # 초기 생산량이 최대 생산 능력보다 높은 경우 if state.fixed_variables["Initial_Production_FPA"] > state.fixed_variables["Max_Capacity_FPA"]: state.fixed_variables["Initial_Production_FPA"] = state.fixed_variables["Max_Capacity_FPA"] notify( state, "warning", "Value of initial production FPA is greater than max production A") # 초기 생산량이 최대 생산 능력보다 높은 경우 if state.fixed_variables["Initial_Production_FPB"] > state.fixed_variables["Max_Capacity_FPB"]: state.fixed_variables["Initial_Production_FPB"] = state.fixed_variables["Max_Capacity_FPB"] notify( state, "warning", "Value of initial production FPB is greater than max production B") # 초기 재고가 최대 생산 능력보다 높은 경우 if state.fixed_variables["Initial_Stock_RPone"] > state.fixed_variables["Max_Stock_RPone"]: state.fixed_variables["Initial_Stock_RPone"] = state.fixed_variables["Max_Stock_RPone"] notify( state, "warning", "Value of initial stock RP1 is greater than max stock 1") # 초기 재고가 최대 생산 능력보다 높은 경우 if state.fixed_variables["Initial_Stock_RPtwo"] > state.fixed_variables["Max_Stock_RPtwo"]: state.fixed_variables["Initial_Stock_RPtwo"] = state.fixed_variables["Max_Stock_RPtwo"] notify( state, "warning", "Value of initial stock RP2 is greater than max stock 2") # 초기 생산이 최대 생산 능력보다 높은 경우 # if state.fixed_variables["Initial_Production_FPA"] + \ state.fixed_variables["Initial_Production_FPB"] > state.fixed_variables["Max_Capacity_of_FPA_and_FPB"]: state.fixed_variables["Initial_Production_FPA"] = int(state.fixed_variables["Max_Capacity_of_FPA_and_FPB"] / 2) state.fixed_variables["Initial_Production_FPB"] = int(state.fixed_variables["Max_Capacity_of_FPA_and_FPB"] / 2) notify( state, "warning", "Value of initial productions is greater than the max capacities") def submit_scenario(state): """ 이 기능은 선택한 시나리오를 제출합니다. '제출' 버튼을 누르거나 새 시나리오를 만들 때 사용됩니다. 오류가 있는지 확인한 다음 문제의 매개변수를 변경하고 시나리오를 제출합니다. 마지막으로 업데이트하려는 모든 변수를 업데이트합니다. Args: state (_type_): the state object of Taipy Returns: _type_: _description_ """ detect_inactive_session(state) # 시나리오에 제공될 매개변수에 오류가 있는지 확인합니다. # catch_error_in_submit(state) # 시나리오 가져오기 scenario = tp.get(state.selected_scenario) # 올바른 매개변수로 시나리오 설정 scenario.fixed_variables.write(state.fixed_variables._dict) # 시나리오 실행 tp.submit(scenario) # 업데이트하려는 모든 변수를 업데이트합니다(ch_results, pie_results 및 측정항목) # update_variables(state) def update_variables(state): """이 함수는 submit_scenario 또는 selected_scenario가 변경될 때만 사용됩니다. 업데이트하려는 모든 유용한 변수를 업데이트합니다. Args: state (_type_): the state object of Taipy """ # 선택한 시나리오 가져오기 scenario = tp.get(state.selected_scenario) # 결과 읽기 state.ch_results = scenario.pipelines['pipeline'].results.read() state.pie_results = pd.DataFrame( { "values": state.ch_results.sum(axis=0), "labels": list(state.ch_results.columns) }) state.sum_costs = state.ch_results['Total Cost'].sum() bool_costs_of_stock = [c for c in state.ch_results.columns if 'Cost' in c and 'Total' not in c and 'Stock' in c] state.sum_costs_of_stock = int(state.ch_results[bool_costs_of_stock].sum(axis=1)\ .sum(axis=0)) bool_costs_of_BO = [c for c in state.ch_results.columns if 'Cost' in c and 'Total' not in c and 'BO' in c] state.sum_costs_of_BO = int(state.ch_results[bool_costs_of_BO].sum(axis=1)\ .sum(axis=0)) def create_chart(ch_results: pd.DataFrame, var: str): """데이터베이스" 페이지에서 볼 수 있는 차트 테이블을 생성/업데이트하는 함수입니다. 이 함수는 "on_change" 함수에서 선택한 그래프가 변경될 때 차트를 변경하는 데 사용됩니다. Args: ch_results (pd.DataFrame): the results database that comes from the state var (str): the string that has to be found in the columns that are going to be used to create the chart table Returns: pd.DataFrame: the chart with the proper columns """ if var == 'Cost': columns = ['index'] + [col for col in ch_results.columns if var in col] else: columns = [ 'index'] + [col for col in ch_results.columns if var in col and 'Cost' not in col] chart = ch_results[columns] return chart def on_change(state, var_name, var_value): """이 함수는 상태 변수의 변경이 완료될 때 호출됩니다. 변경 사항이 확인되면 변경된 변수에 따라 작업을 생성할 수 있습니다. Args: state (State): the state object of Taipy var_name (str): the changed variable name var_value (obj): the changed variable value """ # 변경된 변수가 선택한 시나리오인 경우 if var_name == "selected_scenario" and var_value is not None: scenario = tp.get(state.selected_scenario) state.selected_scenario_is_primary = scenario.is_primary if scenario.results.is_ready_for_reading: # 시나리오가 변경되면 슬라이더를 올바른 값으로 설정합니다. # fixed_temp = tp.get(state.selected_scenario).fixed_variables.read() state_fixed_variables = state.fixed_variables._dict.copy() for key in state.fixed_variables.keys(): state_fixed_variables[key] = fixed_temp[key] state.fixed_variables = state_fixed_variables # I update all the other useful variables update_variables(state) if var_name == "dialog_user" or var_name == "dialog_login" or var_name == "dialog_new_account" or var_name == "user_selected": with open('login/login.json', "r") as f: state.users = json.load(f) state.user_selector = [(user,Icon('images/user.png', user)) for user in state.users.keys()] state.user_selector += [('Create new user', Icon('images/new_account.png', 'Create new user'))] # 그래프가 선택되거나 시나리오가 변경되고 '데이터베이스' 페이지에 있는 경우 # 또는 데이터베이스 페이지로 이동하면 차트 테이블을 업데이트해야 합니다. if (var_name == 'sm_graph_selected' or var_name == "selected_scenario" and state.page =='Databases')\ or (var_name == 'page' and var_value == 'Databases'): str_to_select_chart = None if state.sm_graph_selected == 'Costs': str_to_select_chart = 'Cost' state.cost_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Purchases': str_to_select_chart = 'Purchase' state.purchase_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Productions': str_to_select_chart = 'Production' state.production_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Stocks': str_to_select_chart = 'Stock' state.stock_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Back Order': str_to_select_chart = 'BO' state.bo_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Product FPA': str_to_select_chart = 'FPA' state.fpa_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Product FPB': str_to_select_chart = 'FPB' state.fpb_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Product RP1': str_to_select_chart = 'RP1' state.rp1_data = create_chart(state.ch_results, str_to_select_chart) elif state.sm_graph_selected == 'Product RP2': str_to_select_chart = 'RP2' state.rp2_data = create_chart(state.ch_results, str_to_select_chart) state.chart = create_chart(state.ch_results, str_to_select_chart) state.partial_table.update_content(state, da_create_display_table_md(str_to_select_chart.lower() + '_data')) # '데이터베이스' 페이지에 있는 경우 임시 csv 파일을 만들어야 합니다. # if state.page == 'Databases': state.d_chart_csv_path = PATH_TO_TABLE state.chart.to_csv(state.d_chart_csv_path, sep=',') # 초기 페이지는 "시나리오 관리자" 페이지입니다. page = "Data Visualization" def menu_fct(state, var_name: str, fct, var_value): """Functions that is called when there is a change in the menu control Args: state (_type_): the state object of Taipy var_name (str): the changed variable name var_value (_type_): the changed variable value """ # 올바른 페이지를 렌더링하기 위해 state.page 변수의 값을 변경하십시오. # try: state.page = var_value['args'][0] except BaseException: print("Warning : No args were found") # 'Databases' 페이지에서만 선택할 수 있는 sm_graph_selected의 'All' 옵션에 대한 보안 # if state.page != 'Databases' and state.sm_graph_selected == 'All': state.sm_graph_selected = 'Costs' ########################################################################## # 상태 및 초기 값 생성 ########################################################################## gui = Gui(page=Markdown(main_md), css_file='main') partial_table = gui.add_partial(da_display_table_md) # 테이블의 너비와 높이 값 width_table = "100%" height_table = "100%" # 차트의 너비와 높이 값 width_chart = "100%" height_chart = "60vh" def initialize_variables(): # 차트의 초기값 global scenario, pie_results, sum_costs, sum_costs_of_stock, sum_costs_of_BO, scenario_counter,\ cost_data, stock_data, purchase_data, production_data, fpa_data, fpb_data, bo_data, rp1_data, rp2_data, chart, ch_results,\ chart, scenario_selector, selected_scenario, selected_scenario_is_primary, scenario_selector_two, selected_scenario_two,\ fixed_variables fixed_variables = fixed_variables_default scenario = None pie_results = pd.DataFrame( { "values": [1] * len(list(ch_results.columns)), "labels": list(ch_results.columns) }, index=list(ch_results.columns) ) sum_costs = 0 sum_costs_of_stock = 0 sum_costs_of_BO = 0 sum_costs_of_BO = 0 scenario_counter = 0 cost_data = create_chart(ch_results, 'Cost') purchase_data = create_chart(ch_results, 'Purchase') production_data = create_chart(ch_results, 'Production') stock_data = create_chart(ch_results, 'Stock') bo_data = create_chart(ch_results, 'BO') fpa_data = create_chart(ch_results, 'FPA') fpb_data = create_chart(ch_results, 'FPB') rp1_data = create_chart(ch_results, 'RP1') rp2_data = create_chart(ch_results, 'RP2') chart = ch_results[['index', 'Purchase RP1 Cost', 'Stock RP1 Cost', 'Stock RP2 Cost', 'Purchase RP2 Cost', 'Stock FPA Cost', 'Stock FPB Cost', 'BO FPA Cost', 'BO FPB Cost', 'Total Cost']] # 페이지에 표시될 선택기 scenario_selector = [] selected_scenario = None selected_scenario_is_primary = False scenario_selector_two = scenario_selector.copy() selected_scenario_two = None initialize_variables() pd.read_csv('data/time_series_demand copy.csv').to_csv('data/time_series_demand.csv') if __name__ == "__main__": gui.run(title="Production planning", host='0.0.0.0', port=os.environ.get('PORT', '5050'), dark_mode=False, use_reloader=False) else: app = gui.run(title="Production planning", dark_mode=False, run_server=False)
import taipy as tp from taipy import Scope, Config from taipy.core import Frequency import json from algos.algos import * # 이 코드는 시나리오_cfg 및 파이프라인_csg를 생성하는 코드입니다. # 이 두 변수는 기본 코드에서 새 시나리오를 만드는 데 사용됩니다. # 이 코드는 실행 그래프를 구성할 것입니다. ############################################################################### # 데이터노드 ############################################################################### # 첫 번째 데이터 노드를 생성합니다. 소스는 csv 파일입니다. path_to_demand = 'data/time_series_demand.csv' demand_cfg = Config.configure_data_node(id="demand", storage_type="csv", scope=Scope.SCENARIO, path=path_to_demand, has_header=True) fixed_variables_default_json = open('data/fixed_variables_default.json') fixed_variables_default = json.load(fixed_variables_default_json) # fixed_variables_default를 기본 데이터로 하는 두 번째 데이터 노드 생성 # 이것은 fixed_variable에 대한 다른 값을 제출할 때 쓸 이 데이터 노드입니다. fixed_variables_cfg = Config.configure_data_node(id="fixed_variables", default_data = fixed_variables_default, scope=Scope.PIPELINE) # 모델을 추적하는 데이터 노드는 다음과 같습니다. model_created 데이터 노드, model_solved 데이터 노드 model_created_cfg = Config.configure_data_node(id="model_created", scope=Scope.PIPELINE) model_solved_cfg = Config.configure_data_node(id="model_solved", scope=Scope.PIPELINE) # 이것은 메인 코드에서 결과를 얻는 데 사용할 데이터 노드입니다. results_cfg = Config.configure_data_node(id="results", scope=Scope.PIPELINE) ############################################################################### # 작업 ############################################################################### # (demand_cfg,fixed_variables_cfg) -> |create_model| -> (model_created_cfg) create_model_task = Config.configure_task(id="create_model", input=[demand_cfg,fixed_variables_cfg], function=create_model, output=[model_created_cfg]) # (model_created_cfg) -> |solve_model| -> (model_solved_cfg) solve_model_cfg = Config.configure_task(id="solve_model", input=[model_created_cfg], function=solve_model, output=[model_solved_cfg]) # (model_solved_cfg,fixed_variables_cfg,demand_cfg) -> |create_results| -> (results_cfg) create_results_cfg = Config.configure_task(id="create_results", input=[model_solved_cfg,fixed_variables_cfg,demand_cfg], function=create_results, output=[results_cfg]) ############################################################################### # 파이프라인 및 시나리오 구성 ############################################################################### # 파이프라인 : 작업 실행. 일련의 작업을 수행하는 것을 말합니다. pipeline_cfg = Config.configure_pipeline(id="pipeline",task_configs=[create_model_task,solve_model_cfg,create_results_cfg]) # 시나리오 : 파이프라인 실행. 일련의 파이프라인(여기서는 하나만)의 실행을 나타냅니다. # 이 변수를 통해 새로운 시나리오를 생성합니다. scenario_cfg = Config.configure_scenario(id="scenario",pipeline_configs=[pipeline_cfg], frequency=Frequency.MONTHLY)
import pandas as pd import numpy as np from pulp import * # 이 코드는 이러한 기능이 필요한 작업을 생성하는 config.py에서 사용됩니다. # 이 함수는 전형적인 파이썬 함수입니다(Taipy는 없습니다) ############################################################################### # 기능 ############################################################################### def create_model(demand: pd.DataFrame, fixed_variables: dict): """이 함수는 모델을 생성합니다. 문제의 모든 변수와 제약을 생성합니다. 또한 목적 함수를 생성합니다. Args: demand (pd.DataFrame): 수요 데이터 프레임 fixed_variables (dict): 고정된 변수 사전 Returns: dict: (생성된 모델이 있는) 모델 정보 """ print("모델 생성 중...") monthly_demand_FPA = demand["Demand_A"] monthly_demand_FPB = demand["Demand_B"] nb_periods = len(monthly_demand_FPA) # 모델 생성 prob = LpProblem("Production_Planning", LpMinimize) # 변수 생성 # 제품 A의 경우 monthly_production_FPA = [ LpVariable(f"Monthly_Production_FPA_{m}", 0) for m in range(nb_periods) ] monthly_stock_FPA = [ LpVariable(f"Monthly_Stock_FPA_{m}", 0) for m in range(nb_periods) ] monthly_back_order_FPA = [ LpVariable(f"Monthly_Back_Order_FPA_{m}", 0) for m in range(nb_periods) ] # 제품 B의 경우 monthly_production_FPB = [ LpVariable(f"Monthly_Production_FPB_{m}", 0) for m in range(nb_periods) ] monthly_stock_FPB = [ LpVariable(f"Monthly_Stock_FPB_{m}", 0) for m in range(nb_periods) ] monthly_back_order_FPB = [ LpVariable(f"Monthly_Back_Order_FPB_{m}", 0) for m in range(nb_periods) ] # 제품 1의 경우 monthly_purchase_RPone = [ LpVariable(f"Monthly_Purchase_RPone_{m}", 0) for m in range(nb_periods) ] monthly_stock_RPone = [ LpVariable(f"Monthly_Stock_RPone_{m}", 0) for m in range(nb_periods) ] monthly_stock_not_used_RPone = [ LpVariable(f"Monthly_Stock_not_used_RPone{m}", 0) for m in range(nb_periods) ] monthly_stock_RPone_for_FPA = [ LpVariable(f"Monthly_Stock_RPone_for_FPA{m}", 0) for m in range(nb_periods) ] monthly_stock_RPone_for_FPB = [ LpVariable(f"Monthly_Stock_RPone_for_FPB{m}", 0) for m in range(nb_periods) ] # 제품 2의 경우 monthly_purchase_RPtwo = [ LpVariable(f"Monthly_Purchase_RPtwo{m}", 0) for m in range(nb_periods) ] monthly_stock_RPtwo = [ LpVariable(f"Monthly_Stock_RP{m}two", 0) for m in range(nb_periods) ] monthly_stock_not_used_RPtwo = [ LpVariable(f"Monthly_Stock_not_used_RPtwo{m}", 0) for m in range(nb_periods) ] monthly_stock_RPtwo_for_FPA = [ LpVariable(f"Monthly_Stock_RPtwo_for_FPA{m}", 0) for m in range(nb_periods) ] monthly_stock_RPtwo_for_FPB = [ LpVariable(f"Monthly_Stock_RPtwo_for_FPB{m}", 0) for m in range(nb_periods) ] # 제약 조건 생성 # 제품 A에 대한 Kirchoff의 법칙 for m in range(1, nb_periods): prob += ( monthly_production_FPA[m] - monthly_back_order_FPA[m - 1] + monthly_stock_FPA[m - 1] == monthly_demand_FPA[m] + monthly_stock_FPA[m] - monthly_back_order_FPA[m] ) # 제품 B에 대한 Kirchoff의 법칙 for m in range(1, nb_periods): prob += ( monthly_production_FPB[m] - monthly_back_order_FPB[m - 1] + monthly_stock_FPB[m - 1] == monthly_demand_FPB[m] + monthly_stock_FPB[m] - monthly_back_order_FPB[m] ) # 제품 1에 대한 Kirchoff의 법칙 for m in range(1, nb_periods): prob += ( monthly_purchase_RPone[m - 1] + monthly_stock_not_used_RPone[m - 1] == monthly_stock_RPone[m] ) # 없음 문제에 대한 MS 수정 prob += monthly_purchase_RPone[nb_periods - 1] == 0 for m in range(1, nb_periods): prob += ( monthly_purchase_RPtwo[m - 1] + monthly_stock_not_used_RPtwo[m - 1] == monthly_stock_RPtwo[m] ) # 없음 문제에 대한 MS 수정 prob += monthly_purchase_RPtwo[nb_periods - 1] == 0 for m in range(nb_periods): prob += monthly_production_FPA[m] <= fixed_variables["Max_Capacity_FPA"] prob += monthly_production_FPA[0] == fixed_variables["Initial_Production_FPA"] prob += monthly_back_order_FPA[0] == fixed_variables["Initial_Back_Order_FPA"] prob += monthly_stock_FPA[0] == fixed_variables["Initial_Stock_FPA"] # 제품 A에 대한 BOM에 대한 제약 for m in range(1, nb_periods): prob += ( monthly_production_FPA[m] == fixed_variables["number_RPone_to_produce_FPA"] * monthly_stock_RPone_for_FPA[m - 1] + fixed_variables["number_RPtwo_to_produce_FPA"] * monthly_stock_RPtwo_for_FPA[m - 1] ) for m in range(nb_periods): prob += ( fixed_variables["number_RPone_to_produce_FPA"] * monthly_stock_RPone_for_FPA[m] == fixed_variables["number_RPtwo_to_produce_FPA"] * monthly_stock_RPtwo_for_FPA[m] ) # 변수에 대한 제약조건: 제품 A의 최대값과 초기값 for m in range(nb_periods): prob += monthly_production_FPB[m] <= fixed_variables["Max_Capacity_FPB"] prob += monthly_production_FPB[0] == fixed_variables["Initial_Production_FPB"] prob += monthly_back_order_FPB[0] == fixed_variables["Initial_Back_Order_FPB"] prob += monthly_stock_FPB[0] == fixed_variables["Initial_Stock_FPB"] # 제품 B에 대한 BOM에 대한 제약 for m in range(1, nb_periods): prob += ( monthly_production_FPB[m] == fixed_variables["number_RPone_to_produce_FPB"] * monthly_stock_RPone_for_FPB[m - 1] + fixed_variables["number_RPtwo_to_produce_FPB"] * monthly_stock_RPtwo_for_FPB[m - 1] ) for m in range(nb_periods): prob += ( fixed_variables["number_RPone_to_produce_FPB"] * monthly_stock_RPone_for_FPB[m] == fixed_variables["number_RPtwo_to_produce_FPB"] * monthly_stock_RPtwo_for_FPB[m] ) for m in range(nb_periods): prob += monthly_stock_RPone[m] <= fixed_variables["Max_Stock_RPone"] prob += monthly_stock_RPone[0] == fixed_variables["Initial_Stock_RPone"] prob += monthly_purchase_RPone[0] == fixed_variables["Initial_Purchase_RPone"] for m in range(nb_periods): prob += monthly_stock_RPone[m] == ( monthly_stock_not_used_RPone[m] + monthly_stock_RPone_for_FPA[m] + monthly_stock_RPone_for_FPB[m] ) # 변수에 대한 제약 조건: 제품 1의 최대값 및 초기값 for m in range(nb_periods): prob += monthly_stock_RPtwo[m] <= fixed_variables["Max_Stock_RPtwo"] prob += monthly_stock_RPtwo[0] == fixed_variables["Initial_Stock_RPtwo"] prob += monthly_purchase_RPtwo[0] == fixed_variables["Initial_Purchase_RPtwo"] # 제품 1의 재고를 정의하는 제약 조건 for m in range(nb_periods): prob += monthly_stock_RPtwo[m] == ( monthly_stock_not_used_RPtwo[m] + monthly_stock_RPtwo_for_FPA[m] + monthly_stock_RPtwo_for_FPB[m] ) # 변수에 대한 제약 조건: 제품 A 및 B의 최대 값(누적) for m in range(nb_periods): prob += ( monthly_production_FPA[m] + monthly_demand_FPB[m] <= fixed_variables["Max_Capacity_of_FPA_and_FPB"] ) # 목적 함수 설정 prob += lpSum( fixed_variables["Weight_of_Back_Order"] / 100 * ( fixed_variables["cost_FPA_Back_Order"] * monthly_back_order_FPA[m] + fixed_variables["cost_FPB_Back_Order"] * monthly_back_order_FPB[m] ) + fixed_variables["Weight_of_Stock"] / 100 * ( fixed_variables["cost_FPA_Stock"] * monthly_stock_FPA[m] + fixed_variables["cost_FPB_Stock"] * monthly_stock_FPB[m] + fixed_variables["cost_RPone_Stock"] * monthly_stock_RPone[m] + fixed_variables["cost_RPtwo_Stock"] * monthly_stock_RPtwo[m] ) for m in range(nb_periods) ) # 필요한 모든 정보를 사전에 담기 model_info = { "model_created": prob, "model_solved": None, "Monthly_Production_FPA": monthly_production_FPA, "Monthly_Stock_FPA": monthly_stock_FPA, "Monthly_Back_Order_FPA": monthly_back_order_FPA, "Monthly_Production_FPB": monthly_production_FPB, "Monthly_Stock_FPB": monthly_stock_FPB, "Monthly_Back_Order_FPB": monthly_back_order_FPB, "Monthly_Stock_RPone": monthly_stock_RPone, "Monthly_Stock_RPtwo": monthly_stock_RPtwo, "Monthly_Purchase_RPone": monthly_purchase_RPone, "Monthly_Purchase_RPtwo": monthly_purchase_RPtwo, } print("Model created") return model_info def solve_model(model_info: dict): """이 함수는 모델을 풀고 사전에 있는 모든 솔루션을 반환합니다. Args: model_info (dict): create_model 함수에 의해 전달된 model_info Returns: dict: 해결된 모델 및 솔루션 """ print("모델 풀기...") prob = model_info["model_created"] nb_periods = len(model_info["Monthly_Production_FPA"]) # 모델 풀기 m_solved = prob.solve() # 올바른 변수에서 솔루션 얻기 # 제품 A의 경우 prod_sol_FPA = [ value(model_info["Monthly_Production_FPA"][p]) for p in range(nb_periods) ] stock_sol_FPA = [ value(model_info["Monthly_Stock_FPA"][p]) for p in range(nb_periods) ] bos_sol_FPA = [ value(model_info["Monthly_Back_Order_FPA"][p]) for p in range(nb_periods) ] # 제품 B의 경우 prod_sol_FPB = [ value(model_info["Monthly_Production_FPB"][p]) for p in range(nb_periods) ] stock_sol_FPB = [ value(model_info["Monthly_Stock_FPB"][p]) for p in range(nb_periods) ] bos_sol_FPB = [ value(model_info["Monthly_Back_Order_FPB"][p]) for p in range(nb_periods) ] # 제품 1의 경우 stock_RPone_sol = [ value(model_info["Monthly_Stock_RPone"][p]) for p in range(nb_periods) ] stock_RPtwo_sol = [ value(model_info["Monthly_Stock_RPtwo"][p]) for p in range(nb_periods) ] # 제품 2의 경우 purchase_RPone_sol = [ value(model_info["Monthly_Purchase_RPone"][p]) for p in range(nb_periods) ] purchase_RPtwo_sol = [ value(model_info["Monthly_Purchase_RPtwo"][p]) for p in range(nb_periods) ] # 사전에 넣기 model_info = { "model_created": prob, "model_solved": m_solved, "Monthly_Production_FPA": prod_sol_FPA, "Monthly_Stock_FPA": stock_sol_FPA, "Monthly_Back_Order_FPA": bos_sol_FPA, "Monthly_Production_FPB": prod_sol_FPB, "Monthly_Stock_FPB": stock_sol_FPB, "Monthly_Back_Order_FPB": bos_sol_FPB, "Monthly_Stock_RPone": stock_RPone_sol, "Monthly_Purchase_RPone": purchase_RPone_sol, "Monthly_Stock_RPtwo": stock_RPtwo_sol, "Monthly_Purchase_RPtwo": purchase_RPtwo_sol, } print("Model solved") return model_info def create_results(model_info: dict, fixed_variables: dict, demand: pd.DataFrame): """이 함수는 모델의 결과를 생성합니다. 결과 데이터 프레임은 모든 유용한 정보의 연결입니다. Args: model_info (dict): solve_model 함수에 의해 생성된 사전 fixed_variables (dict): 문제의 고정 변수 demand (pd.DataFrame): A와 B에 대한 수요 Returns: pd.DataFrame: 솔루션에 대한 모든 유용한 정보를 수집하는 데이터 프레임 """ print("결과 생성 중...") # A와 B에 대한 수요 얻기 demand_series_FPA = demand["Demand_A"] demand_series_FPB = demand["Demand_B"] nb_periods = len(demand_series_FPA) # 다른 비용을 계산 cost_FPBO_FPA = fixed_variables["cost_FPA_Back_Order"] * np.array( model_info["Monthly_Back_Order_FPA"] ) cost_stock_FPA = fixed_variables["cost_FPA_Stock"] * np.array( model_info["Monthly_Stock_FPA"] ) cost_FPBO_FPB = fixed_variables["cost_FPB_Back_Order"] * np.array( model_info["Monthly_Back_Order_FPB"] ) cost_stock_FPB = fixed_variables["cost_FPB_Stock"] * np.array( model_info["Monthly_Stock_FPB"] ) cost_stock_RPone = fixed_variables["cost_RPone_Stock"] * np.array( model_info["Monthly_Stock_RPone"] ) cost_stock_RPtwo = fixed_variables["cost_RPtwo_Stock"] * np.array( model_info["Monthly_Stock_RPtwo"] ) cost_product_RPone = fixed_variables["cost_RPone_Purchase"] * np.array( model_info["Monthly_Purchase_RPone"] ) cost_product_RPtwo = fixed_variables["cost_RPtwo_Purchase"] * np.array( model_info["Monthly_Purchase_RPtwo"] ) # 총 비용(비용의 합계) total_cost = ( cost_FPBO_FPA + cost_stock_FPA + cost_FPBO_FPB + cost_stock_FPB + cost_stock_RPone + cost_product_RPone + cost_product_RPtwo + cost_stock_RPtwo ) # 데이터 프레임을 생성하는 데 사용할 사전 생성 dict_for_dataframe = { "Monthly Production FPA": model_info["Monthly_Production_FPA"], "Monthly Stock FPA": model_info["Monthly_Stock_FPA"], "Monthly BO FPA": model_info["Monthly_Back_Order_FPA"], "Max Capacity FPA": [fixed_variables["Max_Capacity_FPA"]] * nb_periods, "Monthly Production FPB": model_info["Monthly_Production_FPB"], "Monthly Stock FPB": model_info["Monthly_Stock_FPB"], "Monthly BO FPB": model_info["Monthly_Back_Order_FPB"], "Max Capacity FPB": [fixed_variables["Max_Capacity_FPB"]] * nb_periods, "Monthly Stock RP1": model_info["Monthly_Stock_RPone"], "Monthly Stock RP2": model_info["Monthly_Stock_RPtwo"], "Monthly Purchase RP1": model_info["Monthly_Purchase_RPone"], "Monthly Purchase RP2": model_info["Monthly_Purchase_RPtwo"], "Demand FPA": demand_series_FPA, "Demand FPB": demand_series_FPB, "Stock FPA Cost": cost_stock_FPA, "Stock FPB Cost": cost_stock_FPB, "Stock RP1 Cost": cost_stock_RPone, "Stock RP2 Cost": cost_stock_RPtwo, "Purchase RP1 Cost": cost_product_RPone, "Purchase RP2 Cost": cost_product_RPtwo, "BO FPA Cost": cost_FPBO_FPA, "BO FPB Cost": cost_FPBO_FPB, "Total Cost": total_cost, "index": range(nb_periods), } results = pd.DataFrame(dict_for_dataframe).round() print("Results created") # 모델이 생성되는 방식 때문에 마지막 두 관찰을 지웁니다. # 값에는 의미가 없습니다. return results[:-2]
import numpy as np import pandas as pd # 이 코드는 수요에 대한 csv 파일을 생성하는 데 사용되며 문제의 소스 데이터입니다. def create_time_series(nb_months=12,mean_A=840,mean_B=760,std_A=96,std_B=72, amplitude_A=108,amplitude_B=144): time_series_A = [] time_series_A.append(mean_A) time_series_B = [] time_series_B.append(mean_B) for i in range(1,nb_months): time_series_A.append(np.random.normal(mean_A + amplitude_A*np.sin(2*np.pi*i/12),std_A)) time_series_B.append(np.random.normal(mean_B + amplitude_B*np.sin((2*np.pi*(i+6))/12),std_B)) time_series_A = pd.Series(time_series_A) time_series_B = pd.Series(time_series_B) month = [i%12 for i in range(nb_months)] year = [i//12 + 2020 for i in range(nb_months)] df_time_series = pd.DataFrame({"Year":year,"Month":month,"Demand_A":time_series_A,"Demand_B":time_series_B}) return df_time_series def time_series_to_csv(nb_months=12,mean_A=840,mean_B=760,std_A=96,std_B=72, amplitude_A=108,amplitude_B=144): time_serie_data = create_time_series(nb_months,mean_A,mean_B,std_A,std_B, amplitude_A,amplitude_B) time_serie_data.to_csv('data/time_series_demand.csv')
da_display_table_md = "<center>\n<|{ch_results.round()}|table|columns={list(chart.columns)}|width=fit-content|height={height_table}|></center>\n" d_chart_csv_path = None def da_create_display_table_md(str_to_select_chart): return "<center>\n<|{" + str_to_select_chart + \ "}|table|width=fit-content|height={height_table}|></center>\n" da_databases_md = """ # 데이터 소스 <|layout|columns=3 2 1|columns[mobile]=1| <layout_scenario| <|layout|columns=1 1 3|columns[mobile]=1| <year| Year <|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|width=100%|on_change=change_sm_month_selector|> |year> <month| Month <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|> |month> <scenario| Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|value_by_id|width=18rem|> |scenario> |> |layout_scenario> <| **Table** \n \n <|{sm_graph_selected}|selector|lov={sm_graph_selector}|dropdown|> |> <br/> <br/> <|{d_chart_csv_path}|file_download|name=table.csv|label=Download table|> |> <|part|render={len(scenario_selector)>0}|partial={partial_table}|> <|part|render=False| <|{scenario_counter}|> |> """
import pandas as pd import json with open('data/fixed_variables_default.json', "r") as f: fixed_variables_default = json.load(f) # Taipy Core의 코드는 아직 실행되지 않았습니다. csv 파일을 이런 식으로 읽습니다. da_initial_demand = pd.read_csv('data/time_series_demand.csv') da_initial_demand = da_initial_demand[['Year', 'Month', 'Demand_A', 'Demand_B']].astype(int) da_initial_demand.columns = [col.replace('_', ' ') for col in da_initial_demand.columns] da_initial_variables = pd.DataFrame({key: [fixed_variables_default[key]] for key in fixed_variables_default.keys() if 'Initial' in key}) # 아래 코드는 열 이름의 형식을 올바르게 지정하는 것입니다. da_initial_variables.columns = [col.replace('_', ' ').replace('one', '1').replace('two', '2').replace('initial ', '') for col in da_initial_variables.columns] da_initial_variables.columns = [col[0].upper() + col[1:] for col in da_initial_variables.columns] da_data_visualisation_md = """ # 데이터 시각화 <|Expand here|expanded=False|expandable| <|layout|columns=1 1 1|columns[mobile]=1| <| ## 초기 재고 <center> <|{da_initial_variables[[col for col in da_initial_variables.columns if 'Stock' in col]]}|table|show_all|width=445px|> </center> |> <| ## 초기 생산 <center> <|{da_initial_variables[[col for col in da_initial_variables.columns if 'Production' in col]]}|table|show_all|width=445px|> </center> |> <| ## 구매한 자료 <center> <|{da_initial_variables[[col for col in da_initial_variables.columns if 'Purchase' in col]]}|table|show_all|width=445px|> </center> |> |> ## 다가오는 달의 수요 <center> <|{da_initial_demand.round()}|table|width=fit-content|show_all|height=fit-content|> </center> |> ## 수요의 진화 <|{da_initial_demand}|chart|x=Month|y[1]=Demand A|y[2]=Demand B|width=100%|> """
from pages.annex_scenario_manager.chart_md import ch_chart_md, ch_choice_chart, ch_show_pie, ch_layout_dict, ch_results from pages.annex_scenario_manager.parameters_md import pa_parameters_md, pa_param_selector, pa_param_selected, pa_choice_product_param, pa_product_param from taipy.gui.gui_actions import notify from taipy.gui import Icon import taipy as tp import datetime as dt def remove_scenario_from_tree(scenario, sm_tree_dict: dict): """이 함수는 트리에서 시나리오를 찾아 제거합니다. Args: scenario (Scenario): the scenario to be deleted from the tree sm_tree_dict (dict): the tree dict from which the scenario has to be deleted from Returns: tree: the tree without the scenario """ # 시나리오가 포함되지 않은 경우 삭제되는 주기 키입니다. # cycle_keys_to_pop = [] # We explore our 2-level tree for cycle, scenarios_ in sm_tree_dict.items(): for scenario_id, scenario_name in scenarios_: if scenario_id == scenario.id: # 같은 id를 가진 시나리오를 트리에서 제거 sm_tree_dict[cycle].remove((scenario_id, scenario_name)) # 비어 있는 경우 삭제할 주기에 주기를 추가합니다. if len(sm_tree_dict[cycle]) == 0: cycle_keys_to_pop += [cycle] print("------------- Scenario found and deleted -------------") break # 빈 주기 제거 for cycle in cycle_keys_to_pop: sm_tree_dict.pop(cycle) return sm_tree_dict sm_tree_dict = {} def create_sm_tree_dict(scenarios, sm_tree_dict: dict = None): """이 기능은 시나리오 목록에서 트리 사전을 생성합니다. 트리 수준은 다음과 같습니다. 연도/월/시나리오 Args: scenarios (list): 시나리오 목록 sm_tree_dict (dict, optional): 모든 시나리오를 수집하는 트리. 기본값은 없음입니다. Returns: tree: t시나리오를 분류하기 위해 생성된 트리 """ print("트리 딕셔너리 생성 중...") if sm_tree_dict is None: # 아직 초기화되지 않은 경우 트리 딕셔너리를 초기화합니다. sm_tree_dict = {} # 목록에 있는 모든 시나리오 추가 for scenario in scenarios: # 주기의 이름을 만듭니다. date = scenario.creation_date year = f"{date.strftime('%Y')}" period = f"{date.strftime('%b')}" # 아직 추가되지 않은 경우 주기를 추가합니다. if year not in sm_tree_dict: sm_tree_dict[year] = {} if period not in sm_tree_dict[year]: sm_tree_dict[year][period] = [] # 시나리오 ID와 시나리오 이름으로 새 항목 추가 scenario_name = ( Icon( 'images/main.svg', scenario.name) if scenario.is_primary else scenario.name) sm_tree_dict[year][period] += [(scenario.id, scenario_name)] return sm_tree_dict def create_time_selectors(): """이 기능은 GUI에 표시될 시간 선택기를 생성하고 모든 시나리오를 수집하는 트리 딕셔너리도 생성합니다. Returns: dict: 모든 시나리오를 수집하는 트리 딕셔너리 list: 연도 목록 list: 월 목록 """ all_scenarios = tp.get_scenarios() all_scenarios_ordered = sorted( all_scenarios, key=lambda x: x.creation_date.timestamp()) sm_tree_dict = create_sm_tree_dict(all_scenarios_ordered) if sm_current_year not in list(sm_tree_dict.keys()): sm_tree_dict[sm_current_year] = {} if sm_current_month not in sm_tree_dict[sm_current_year]: sm_tree_dict[sm_current_year][sm_current_month] = [] sm_year_selector = list(sm_tree_dict.keys()) sm_month_selector = list(sm_tree_dict[sm_selected_year].keys()) return sm_tree_dict, sm_year_selector, sm_month_selector def change_sm_month_selector(state): """이 함수는 사용자가 연도 선택기를 변경할 때 호출됩니다. 월 선택기에 대해 GUI에 표시된 선택기를 업데이트하고 시나리오 선택기에 대해 동일한 기능을 호출합니다. Args: state (State): 모든 GUI 변수 """ state.sm_month_selector = list( state.sm_tree_dict[state.sm_selected_year].keys()) if state.sm_selected_month not in state.sm_month_selector: state.sm_selected_month = state.sm_month_selector[0] change_scenario_selector(state) def change_scenario_selector(state): """이 함수는 사용자가 월 선택자를 변경할 때 호출됩니다. 시나리오 선택기에 대한 GUI에 표시된 선택기를 업데이트합니다. Args: state (State): 모든 GUI 변수 """ state.scenario_selector = list( state.sm_tree_dict[state.sm_selected_year][state.sm_selected_month]) state.scenario_selector_two = state.scenario_selector.copy() if len(state.scenario_selector) > 0: state.selected_scenario = state.scenario_selector[0][0] if (state.sm_selected_month != sm_current_month or state.sm_selected_year != sm_current_year) and state.sm_show_config_scenario: notify(state, "info", "This scenario is historical, you can't modify it") state.sm_show_config_scenario = False sm_scenario_manager_md = """ # 시나리오 매니저 <|layout|columns=8 4 4 3|columns[mobile]=1| <layout_scenario| <|layout|columns=1 1 3|columns[mobile]=1| <| Year <|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|width=100%|on_change=change_sm_month_selector|> |> <| Month <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|> |> <| Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|value_by_id|width=18rem|> |> |> |layout_scenario> <graph| **Graph** <br/> <|{sm_graph_selected}|selector|lov={sm_graph_selector}|dropdown|> |graph> <toggle_chart| <center> Pie/Line chart <|{ch_show_pie}|toggle|lov={ch_choice_chart}|value_by_id|active={not 'Product ' in sm_graph_selected}|> </center> |toggle_chart> <button_configure_scenario| <br/> <br/> <|{sm_show_config_scenario_name}|button|on_action=show_config_scenario_action|active={sm_selected_month == sm_current_month and sm_selected_year == sm_current_year}|> |button_configure_scenario> |> <|part|render={sm_show_config_scenario}| """ + pa_parameters_md + """ |> <|part|render={not(sm_show_config_scenario)}| """ + ch_chart_md + """ |> """ # 시나리오 구성 버튼 sm_show_config_scenario_name = "Hide configuration" sm_show_config_scenario = True def show_config_scenario_action(state): state.sm_show_config_scenario = not state.sm_show_config_scenario state.sm_show_config_scenario_name = "Hide configuration" if state.sm_show_config_scenario else "Configure scenario" sm_current_month = dt.date.today().strftime('%b') sm_current_year = dt.date.today().strftime('%Y') sm_selected_year = sm_current_year sm_selected_month = sm_current_month sm_tree_dict, sm_year_selector, sm_month_selector = create_time_selectors() # 표시할 그래프 선택 sm_graph_selector = [ 'Costs', 'Purchases', 'Productions', 'Stocks', 'Back Order', 'Product RP1', 'Product RP2', 'Product FPA', 'Product FPB'] sm_graph_selected = sm_graph_selector[0]
from data.create_data import time_series_to_csv from config.config import scenario_cfg from taipy.core import taipy as tp import datetime as dt import pandas as pd cc_data = pd.DataFrame( { 'Date': [dt.datetime(2021, 1, 1)], 'Cycle': [dt.date(2021, 1, 1)], 'Cost of Back Order': [0], 'Cost of Stock': [0] }) cc_show_comparison = False cc_layout = {'barmode': 'stack', 'margin': {"t": 20}} cc_creation_finished = False def cc_create_scenarios_for_cycle(): """이 기능은 여러 주기에 대한 시나리오를 생성하고 제출합니다. """ date = dt.datetime(2021, 1, 1) month = date.strftime('%b') year = date.strftime('%Y') current_month = dt.date.today().strftime('%b') current_year = dt.date.today().strftime('%Y') while month != current_month or year != current_year: date += dt.timedelta(days=15) month = date.strftime('%b') year = date.strftime('%Y') if month != current_month or year != current_year: time_series_to_csv( nb_months=12, mean_A=840, mean_B=760, std_A=96, std_B=72, amplitude_A=108, amplitude_B=144) name = f"Scenario {date.strftime('%d-%b-%Y')}" scenario = tp.create_scenario(scenario_cfg, creation_date=date, name=name) tp.submit(scenario) def update_cc_data(state): """이 기능은 모든 주기의 기본 시나리오에 대한 이월 주문 및 재고 비용의 발전을 만듭니다.""" all_scenarios = tp.get_primary_scenarios() dates = [] cycles = [] costs_of_back_orders = [] costs_of_stock = [] all_scenarios_ordered = sorted( all_scenarios, key=lambda x: x.creation_date.timestamp()) # delete? for scenario in all_scenarios_ordered: results = scenario.results.read() if results is not None: date_ = scenario.creation_date dates.append(date_) cycles.append(dt.date(date_.year, date_.month, 1)) # sum_costs_of_stock 메트릭 생성 bool_costs_of_stock = [c for c in results.columns if 'Cost' in c and 'Total' not in c and 'Stock' in c] sum_costs_of_stock = int(results[bool_costs_of_stock].sum(axis=1)\ .sum(axis=0)) # sum_costs_of_BO 지표 생성 bool_costs_of_BO = [c for c in results.columns if 'Cost' in c and 'Total' not in c and 'BO' in c] sum_costs_of_BO = int(results[bool_costs_of_BO].sum(axis=1)\ .sum(axis=0)) costs_of_back_orders.append(sum_costs_of_BO) costs_of_stock.append(sum_costs_of_stock) state.cc_data = pd.DataFrame({'Date': dates, 'Cycle': cycles, 'Cost of Back Order': costs_of_back_orders, 'Cost of Stock': costs_of_stock}) state.cc_show_comparison = True cc_compare_cycles_md = """ # 사이클 비교 <center> <|Compare Cycles|button|on_action={update_cc_data}|> </center> <|part|render={cc_show_comparison}| <|Table|expanded=False|expandable| <center> <|{cc_data}|table|width=fit-content|> </center> |> ## 비용의 진화 <|{cc_data}|chart|type=bar|x=Cycle|y[1]=Cost of Back Order|y[2]=Cost of Stock|layout={cc_layout}|width=100%|height=600|> |> """
import taipy as tp import pandas as pd cs_compare_scenario_md = """ # 시나리오 비교 <|layout|columns=3 3 1|columns[mobile]=1| <layout_scenario| **Scenario 1** <|layout|columns=1 1 3|columns[mobile]=1| <| Year <|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|width=100%|on_change=change_sm_month_selector|> |> <| Month <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|> |> <| Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|value_by_id|width=18rem|> |> |> |layout_scenario> <layout_scenario| **Scenario 2** <|layout|columns=1 1 3|columns[mobile]=1| <| Year <|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|width=100%|on_change=change_sm_month_selector|> |> <| Month <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|> |> <| Scenario <|{selected_scenario_two}|selector|lov={scenario_selector_two}|dropdown|value_by_id|width=18rem|> |> |> |layout_scenario> <br/> <br/> <br/> <center> <|Compare scenario|button|on_action=compare_scenarios|active={len(scenario_selector)>1}|> </center> |> <|part|render={cs_show_comparaison and len(scenario_selector)>=2}| <|layout|columns=1 1 1|columns[mobile]=1| <| **Representation** <|{cs_compar_graph_selected}|selector|lov={cs_compar_graph_selector}|dropdown=|value_by_id|> |> <br/> <br/> <center> **Total cost of scenario 1:** *<|{str(int(sum_costs/1000))+' K'}|>* </center> <br/> <br/> <center> **Total cost of scenario 2:** *<|{str(int(cs_sum_costs_two/1000))+' K'}|>* </center> |> <|part|render={cs_compar_graph_selected=='Metrics'}| <br/> <br/> <|layout|columns=1 1|columns[mobile]=1| <|{cs_comparaison_metrics_df[cs_comparaison_metrics_df['Metrics']=='BO Cost']}|chart|type=bar|x=Metrics|y[1]=Scenario 1: BO Cost|y[2]=Scenario 2: BO Cost|color[2]=#2b93db|width={width_chart}|height={cs_height_bar_chart}|layout={ch_layout_dict}|> <|{cs_comparaison_metrics_df[cs_comparaison_metrics_df['Metrics']=='Stock Cost']}|chart|type=bar|x=Metrics|y[1]=Scenario 1: Stock Cost|y[2]=Scenario 2: Stock Cost|color[1]=#ff7f0e|color[2]=#ff9a41|width={width_chart}|height={cs_height_bar_chart}|layout={ch_layout_dict}|> |> |> <|part|render={cs_compar_graph_selected=='Costs'}| <|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Cost|y[2]=Scenario 2 Cost|color[2]=#1f77b4|line[2]=dash|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|> |> <|part|render={cs_compar_graph_selected=='Purchases'}| <|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Purchase|y[2]=Scenario 2 Purchase|color[2]=#1f77b4|line[2]=dash|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|> |> <|part|render={cs_compar_graph_selected=='Productions'}| <|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Production|y[2]=Scenario 2 Production|color[2]=#1f77b4|line[2]=dash|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|> |> <|part|render={cs_compar_graph_selected=='Stocks'}| <|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Stock|y[2]=Scenario 2 Stock|color[2]=#1f77b4|line[2]=dash|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|> |> <|part|render={cs_compar_graph_selected=='Back Order'}| <|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 BO|y[2]=Scenario 2 BO|color[2]=#1f77b4|line[2]=dash|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|> |> |> """ def compare_scenarios(state): """이 기능은 사용자가 다른 메트릭에서 선택한 두 가지 시나리오를 비교하고 비교 그래프에 대한 데이터 프레임을 채웁니다. Args: state (State): 모든 GUI 변수 """ state.cs_show_comparaison = True # 사용자가 선택한 두 가지 시나리오 가져오기 results_1 = tp.get(state.selected_scenario).pipelines['pipeline'].results.read() results_2 = tp.get(state.selected_scenario_two).pipelines['pipeline'].results.read() state.cs_sum_costs_two = results_2['Total Cost'].sum() # 두 시나리오의 부분 비용 계산 bool_costs_of_stock = [c for c in results_2.columns if 'Cost' in c and 'Total' not in c and 'Stock' in c] state.cs_sum_costs_of_stock_two = int(results_2[bool_costs_of_stock].sum(axis=1)\ .sum(axis=0)) bool_costs_of_BO = [c for c in results_2.columns if 'Cost' in c and 'Total' not in c and 'BO' in c] state.cs_sum_costs_of_BO_two = int(results_2[bool_costs_of_BO].sum(axis=1)\ .sum(axis=0)) # 비교 그래프의 데이터 프레임을 채웁니다. new_result_1 = pd.DataFrame({"index": results_1.index}) new_result_2 = pd.DataFrame({"index": results_2.index}) columns_to_merge = ['Cost', 'Purchase', 'Production', 'Stock', 'BO'] for col in columns_to_merge: if col == 'Cost': bool_col_1 = [c for c in results_1.columns if col in c and 'Total' not in c] bool_col_2 = [c for c in results_2.columns if col in c and 'Total' not in c] else: bool_col_1 = [c for c in results_1.columns if col in c and 'Total' not in c and 'Cost' not in c] bool_col_2 = [c for c in results_2.columns if col in c and 'Total' not in c and 'Cost' not in c] new_result_1[col] = results_1[bool_col_1].sum(axis=1) new_result_2[col] = results_2[bool_col_2].sum(axis=1) new_result_1.columns = ['Scenario 1 ' + column if column != 'index' else 'index' for column in new_result_1.columns] new_result_2.columns = ['Scenario 2 ' + column if column !='index' else 'index' for column in new_result_2.columns] state.cs_comparaison_metrics_df = pd.DataFrame( { "Metrics": [ "Stock Cost", "BO Cost"], "Scenario 1: Stock Cost": [state.sum_costs_of_stock, None], "Scenario 2: Stock Cost": [state.cs_sum_costs_of_stock_two, None], "Scenario 1: BO Cost": [None, state.sum_costs_of_BO], "Scenario 2: BO Cost": [None, state.cs_sum_costs_of_BO_two] }) state.cs_comparaison_df = pd.merge(new_result_1, new_result_2, on="index", how="inner") print("Comparaison done") pass cs_height_bar_chart = "80%" cs_show_comparaison = False cs_compar_graph_selector = [ 'Metrics', 'Costs', 'Purchases', 'Productions', 'Stocks', 'Back Order'] cs_compar_graph_selected = cs_compar_graph_selector[0] cs_comparaison_df = pd.DataFrame({'index': [0], 'Scenario 1 Cost': [0], 'Scenario 1 Purchase': [0], 'Scenario 1 Production': [0], 'Scenario 1 Stock': [0], 'Scenario 1 BO': [0], 'Scenario 2 Cost': [0], 'Scenario 2 Purchase': [0], 'Scenario 2 Production': [0], 'Scenario 2 Stock': [0], 'Scenario 2 BO': [0]}) cs_comparaison_metrics_df = pd.DataFrame({"Metrics": ["Stock Cost", "BO Cost"], "Scenario 1: Stock Cost": [0, 0], "Scenario 2: Stock Cost": [0, 0], "Scenario 1: BO Cost": [0, 0], "Scenario 2: BO Cost": [0, 0]}) cs_sum_costs_of_stock_two = 0 cs_sum_costs_of_BO_two = 0 cs_sum_costs_two = 0
from .chart_md import ch_chart_md, ch_layout_dict, ch_results from config.config import fixed_variables_default from taipy.gui import Icon def create_sliders(fixed_variables): """" 이것은 매개변수에 대한 슬라이더를 자체적으로 생성하는 매우 복잡한 함수입니다. 손으로 할 수도 있었습니다. 그러나 이 방법은 장기적으로 더 유연합니다. """ # 반환될 문자열 slider_md = "" # 매개변수에는 세 가지 유형이 있습니다. param_types = ['Capacity Constraints','Objective Weights','Initial Parameters'] # 다른 제품의 슬라이더는 (토글을 사용하여) 다른 섹션으로 그룹화됩니다. products = ['FPA','FPB','RPone','RPtwo','weight'] for p_type in param_types: # p_type이 선택되면 해당 부분이 표시됩니다. slider_md += "\n<|part|render={pa_param_selected == '" + p_type + "'}|" if p_type != 'Objective Weights': # the part will be shown if 'Objective Weights' is not selected slider_md +=""" <center> <|{pa_product_param}|toggle|lov={pa_choice_product_param}|value_by_id|> </center> <br/> """ if p_type == 'Objective Weights': var_p = [key for key in fixed_variables.keys() if ('produce' not in key and 'Weight' in key)] # 각 변수(var_p)에 대해 슬라이더가 생성되고 있습니다. # 최소값과 최대값도 자동으로 생성됩니다. for var in var_p : min_ = str(int(fixed_variables[var]*0.35)) max_ = str(int(fixed_variables[var]*1.65)) if fixed_variables[var] != 0 else '50' name_of_var = var.replace('cost','Unit Cost -') name_of_var = name_of_var[0].upper() + name_of_var[1:].replace('_',' ').replace('one','1').replace('two','2') slider_md += "\n\n" + name_of_var + " : *<|{fixed_variables."+var+"}|>*" slider_md += "\n<|{fixed_variables."+var+"}|slider|orientation=h|min="+min_+"|max="+max_+"|step=5|>" else : # 제품에 따라 부품이 표시됩니다. for p in products : slider_md += "\n<|part|render={pa_product_param == 'product_"+p+"'}|" if p_type == 'Capacity Constraints': var_p = [key for key in fixed_variables.keys() if (p in key and 'produce' not in key and 'Max' in key)] else : var_p = [key for key in fixed_variables.keys() if (p in key and 'produce' not in key and 'Capacity' not in key and 'Max' not in key)] # 각 변수(var_p)에 대해 슬라이더가 생성되고 있습니다. # 최소값과 최대값도 자동으로 생성됩니다. for var in var_p : min_ = str(int(fixed_variables[var]*0.35)) max_ = str(int(fixed_variables[var]*1.65)) if fixed_variables[var] != 0 else '50' name_of_var = var.replace('cost','Unit Cost -') name_of_var = name_of_var[0].upper() + name_of_var[1:].replace('_',' ').replace('one','1').replace('two','2') slider_md += "\n\n" + name_of_var + " : *<|{fixed_variables."+var+"}|>*" slider_md += "\n<|{fixed_variables."+var+"}|slider|orientation=h|min="+min_+"|max="+max_+"|step=5|>" slider_md += "\n|>" slider_md+="\n|>" return slider_md pa_sliders_md = create_sliders(fixed_variables_default) pa_parameters_md = """ <|layout|columns=139 1 45|columns[mobile]=1|gap=1.5rem| """ + ch_chart_md + """ <blank_space| |blank_space> <| <center> <|{pa_param_selected}|selector|lov={pa_param_selector}|> </center> """ + pa_sliders_md + """ <|Delete|button|on_action={delete_scenario_fct}|active={len(scenario_selector)>0}|id=delete_button|> <|Make Primary|button|on_action={make_primary}|active={len(scenario_selector)>0 and not selected_scenario_is_primary}|id=make_primary|> <|Re-optimize|button|on_action=submit_scenario|active={len(scenario_selector)>0}|id=re_optimize|> <|New scenario|button|on_action=create_new_scenario|id=new_scenario|> |> |> """ pa_param_selector = ['Capacity Constraints','Objective Weights','Initial Parameters'] pa_param_selected = pa_param_selector[0] # 슬라이더 선택 토글 pa_choice_product_param = [("product_RPone", Icon("images/P1.png", "product_RPone")), ("product_RPtwo", Icon("images/P2.png", "product_RPtwo")), ("product_FPA", Icon("images/PA.png", "product_FPA")), ("product_FPB", Icon("images/PB.png", "product_FPB"))] pa_product_param = 'Else'
from taipy.gui import Icon import pandas as pd ch_layout_dict = {"margin":{"t":20}} # 차트 설정 토글 ch_choice_chart = [("pie", Icon("images/pie.png", "pie")), ("chart", Icon("images/chart.png", "chart"))] ch_show_pie = ch_choice_chart[1][0] ch_results = pd.DataFrame({"Monthly Production FPA":[], "Monthly Stock FPA": [], "Monthly BO FPA": [], "Max Capacity FPA": [], "Monthly Production FPB": [], "Monthly Stock FPB": [], "Monthly BO FPB": [], "Max Capacity FPB": [], "Monthly Stock RP1":[], "Monthly Stock RP2":[], "Monthly Purchase RP1":[], "Monthly Purchase RP2":[], "Demand FPA": [], "Demand FPB": [], 'Stock FPA Cost': [], 'Stock FPB Cost': [], 'Stock RP1 Cost': [], 'Stock RP2 Cost': [], 'Purchase RP1 Cost': [], 'Purchase RP2 Cost': [], "BO FPA Cost":[], "BO FPB Cost":[], "Total Cost": [], "index": []}) def get_col(ch_results:pd.DataFrame,var:str): if var == 'Cost': columns = [col for col in ch_results.columns if var in col] elif var=='Production': columns = [col for col in ch_results.columns if (var in col or 'Capacity' in col) and 'Cost' not in col] else : columns = [col for col in ch_results.columns if var in col and 'Cost' not in col] return columns def get_y_format(columns): md ="" for col_i in range(len(columns)): md+=f"y[{col_i+1}]={columns[col_i]}|" if "Capacity" in columns[col_i] : md+=f"line[{col_i+1}]=dash|" return md[:-1] def create_charts_md(ch_results): """" 이것은 모든 차트를 생성하는 매우 복잡한 함수입니다. 또한 사용자 작업에 따라 시간이 지남에 따라 변경되는 단일 문자열을 갖도록 수동으로 수행하거나 부분적으로 사용할 수 있습니다. """ # 차트용 md를 만드는 매개변수 config_scenario_option = ["sm_show_config_scenario", "not(sm_show_config_scenario)"] # 파이는 이러한 표현에 가능합니다. pie_possible = ['Costs','Purchases','Productions','Stocks','Back Order'] charts_option_for_col = ['Cost','Purchase','Production','Stock','BO','FPA','FPB','RP1','RP2'] charts_option = ['Costs','Purchases','Productions','Stocks','Back Order','Product FPA','Product FPB','Product RP1','Product RP2'] md = "" for config_scenario in config_scenario_option : md += "\n<|part|render={"+config_scenario+"}|" md += "\n<|" for charts_i in range(len(charts_option)): columns = get_col(ch_results,charts_option_for_col[charts_i]) y_format = get_y_format(columns) columns = [c for c in columns if 'Total' not in c] # in the pie, we don't want to show the total if charts_option[charts_i] in pie_possible : md +="\n<|{pie_results.loc[" + str(columns) + "]}|chart|type=pie|x=values|label=labels|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|render={ch_show_pie=='pie' and sm_graph_selected=='"+charts_option[charts_i]+"'}|>" md += "\n<|{ch_results}|chart|x=index|" + y_format + "|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|render={ch_show_pie=='chart' and sm_graph_selected=='"+charts_option[charts_i]+"'}|>" else: md += "\n<|{ch_results}|chart|x=index|" + y_format + "|width={width_chart}|height={height_chart}|layout={ch_layout_dict}|render={sm_graph_selected=='"+charts_option[charts_i]+"'}|>" md += """\n|> |>""" return md ch_chart_md_1 = """ <br/> <|layout|columns=1 1|columns[mobile]=1| <| <center> <|{str(int(sum_costs_of_BO/1000))+' K'}|indicator|value={sum_costs_of_BO}|min=50_000|max=1_000|width=93%|> Back Order Cost </center> |> <| <center> <|{str(int(sum_costs_of_stock/1000))+' K'}|indicator|value={sum_costs_of_stock}|min=100_000|max=25_000|width=93%|> Stock Cost </center> |> |> """ ch_chart_md = """ <| <|part|render={len(scenario_selector)>0}| <|""" + ch_chart_md_1 + """|> """ + create_charts_md(ch_results) + """ |> <no_scenario|part|render={len(scenario_selector)==0}| ## No scenario created for the current month |no_scenario> |> """
import taipy as tp from taipy.gui import Icon import datetime as dt import os import hashlib import json login = '' password = '' dialog_login = False dialog_new_account = False new_account = False all_scenarios = tp.get_scenarios() users = {} json.dump(users, open('login/login.json', 'w')) dialog_user = True user_selector = [('Create new user',Icon('/images/new_account.png','Create new user'))] user_in_session = '' selected_user = None salt = os.urandom(32) def open_dialog_user(state): state.login = '' state.dialog_user = True def encode(password): key = str(hashlib.pbkdf2_hmac( 'sha256', # The hash digest algorithm for HMAC password.encode('utf-8'), # Convert the password to bytes salt, # Provide the salt 100000, # It is recommended to use at least 100,000 iterations of SHA-256 dklen=128 # Get a 128 byte key )) return key def test_password(users, login, new_password): old_key = users[login]['password'] new_key = str(hashlib.pbkdf2_hmac( 'sha256', # The hash digest algorithm for HMAC new_password.encode('utf-8'), # Convert the password to bytes salt, # Provide the salt 100000, # It is recommended to use at least 100,000 iterations of SHA-256 dklen=128 # Get a 128 byte key )) return old_key == new_key def detect_inactive_session(state): users[state.login]['last_visit'] = str(dt.datetime.now()) json.dump(users, open('login/login.json', 'w')) for user in users.keys(): if (dt.datetime.now() - dt.datetime.strptime(users[user]['last_visit'], '%Y-%m-%d %H:%M:%S.%f')).seconds >= 6 * 3600: [tp.delete(s.id) for s in tp.get_scenarios() if 'user' in s.properties and users[user] == s.properties['user']] users.pop(user) login_md = """ <|part|id=part_dialog_button| Welcome, <|{login if login != '' else 'login'}|button|on_action={open_dialog_user}|id=dialog_button|>! |> <|{dialog_user}|dialog|title=Set account|id=dialog_user|width=20%| <|{selected_user}|selector|lov={user_selector}|on_change=on_change_user_selector|id=user_selector|width=100%|value_by_id|> |> <|{dialog_login}|dialog|title=Login|on_action=validate_login|labels=Cancel; Login|id=dialog_user|width=20%| <|{selected_user}|selector|lov={[(login, Icon('/images/user.png', login))]}|id=user_selected|width=100%|value_by_id|> Password <|{password}|input|password=True|> |> <|{dialog_new_account}|dialog|title=Register|on_action=validate_login|labels=Cancel; Register|id=dialog_user|width=20%| Username <|{login}|input|> Password <|{password}|input|password=True|> |> <|part|render={1==0}| <|{user_in_session}|> |> """
from taipy import Gui page = """ # Hello World 🌍 with *Taipy* This is my first Taipy test app. And it is running fine! """ Gui(page).run(use_reloader=True) # use_reloader=True if you are in development
from taipy import Gui from page.dashboard_fossil_fuels_consumption import * if __name__ == "__main__": Gui(page).run( use_reloader=True, title="Test", dark_mode=False, ) # use_reloader=True if you are in development
import pandas as pd import taipy as tp from data.data import dataset_fossil_fuels_gdp country = "Spain" region = "Europe" lov_region = list(dataset_fossil_fuels_gdp.Entity.unique()) def load_dataset(_country): """Load dataset for a specific country. Args: _country (str): The name of the country. Returns: pandas.DataFrame: A DataFrame containing the fossil fuels GDP data for the specified country. """ dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp.reset_index() dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp[ dataset_fossil_fuels_gdp["Entity"] == _country ] return dataset_fossil_fuels_gdp_cp dataset_fossil_fuels_gdp_cp = load_dataset(country) def on_change_country(state): """Update the dataset based on the selected country. Args: state (object): The "state" of the variables ran by the program (value changes through selectors) Returns: None """ print("country is:", state.country) _country = state.country dataset_fossil_fuels_gdp_cp = load_dataset(_country) state.dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp layout = {"yaxis": {"range": [0, 100000]}, "xaxis": {"range": [1965, 2021]}} page = """ # Fossil Fuel consumption by per capita by country* Data comes from <a href="https://ourworldindata.org/grapher/per-capita-fossil-energy-vs-gdp" target="_blank">Our World in Data</a> <|{country}|selector|lov={lov_region}|on_change=on_change_country|dropdown|label=Country/Region|> <|{dataset_fossil_fuels_gdp_cp}|chart|type=plot|x=Year|y=Fossil fuels per capita (kWh)|height=200%|layout={layout}|> ## Fossil fuel per capita for <|{country}|>: <|{dataset_fossil_fuels_gdp_cp}|table|height=400px|width=95%|> """
import pandas as pd dataset_fossil_fuels_gdp = pd.read_csv("data/per-capita-fossil-energy-vs-gdp.csv") country_codes = pd.read_csv("./data/country_codes.csv") dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp.merge( country_codes[["alpha-3", "region"]], how="left", left_on="Code", right_on="alpha-3" ) dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp[ ~dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"].isnull() ].reset_index() dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"] = ( dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"] * 1000 )
from taipy.gui import Gui as tpGui from taipy.gui import notify as tpNotify import pandas as pd text = "Original text" col1 = "first col" col2 = "second col" col3 = "third col" ballon_img = "./img/Ballon_15_20.png" section_1 = """ <h1 align="center">Getting started with Taipy GUI</h1> <|layout|columns=1 2 2| <| My text: <|{text}|> <|{text}|input|> |> <| <center> <|Press Me|button|on_action=on_button_action|> **Ein Button:** <|{col1}|> </center> |> <| <center> <|{ballon_img}|image|height=30%|width=30%|label=This is one ballon|> </center> |> |> """ section_2 = ''' ##Darstellung Gas-Verbrauch <|{dataset}|chart|mode=line|x=Datum|y[1]=Verbrauch|y[2]=Betriebsstunden|yaxis[2]=y2|layout={layout}|color[1]=green|color[2]=blue|> ''' layout = { "xaxis": { # Force the title of the x axis "title": "Time-Range" }, "yaxis": { # Force the title of the first y axis "title": "Verbrauch", # Place the first axis on the left "side": "left" }, "yaxis2": { # Second axis overlays with the first y axis "overlaying": "y", # Place the second axis on the right "side": "right", # and give it a title "title": "Betriebsstunden" }, "legend": { # Place the legend above chart "yanchor": "middle" } } def on_button_action(state): tpNotify(state, 'info', f'The text is: {state.text}') state.text = "Button Pressed" def on_change(state, var_name, var_value): if var_name == "text" and var_value == "Reset": state.text = "" return def get_data(path: str): dataset = pd.read_csv(path) dataset["Datum"] = pd.to_datetime(dataset["Datum"], dayfirst=True).dt.date return dataset gui = tpGui(page=section_1 + section_2) dataset = get_data("./dataset.csv") if __name__ == '__main__': # Execute by the _Python_ interpretor, for debug only. tpGui.run(gui, title="Taipy Demo", use_reloader=True, dark_mode=True, port=5001, flask_log=False) else: # Execute by _Gunicorn_, for production environment. app = tpGui.run(gui, title="Taipy Demo", run_server=False)
import os import logging from opentelemetry import metrics from opentelemetry import trace from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.resources import Resource, SERVICE_NAME from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ) logging.basicConfig(level=logging.DEBUG) service_name = os.environ.get("OTEL_SERVICE_NAME", __file__) otel_endpoint = os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT", "localhost:4317") resource = Resource(attributes={SERVICE_NAME: service_name}) ###### Metrics metrics_reader = PeriodicExportingMetricReader( OTLPMetricExporter(endpoint=otel_endpoint) ) metrics_provider = MeterProvider(metric_readers=[metrics_reader], resource=resource) # Set the global default metrics provider metrics.set_meter_provider(metrics_provider) ###### Traces trace_provider = TracerProvider() processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=otel_endpoint)) trace_provider.add_span_processor(processor) # Sets the global default tracer provider trace.set_tracer_provider(trace_provider) meter = metrics.get_meter_provider().get_meter("my.meter.name") tracer = trace.get_tracer("my.tracer.name") def init_metrics() -> dict: """Initializes metrics""" scenario_execution_counter = meter.create_counter( "scenario_execution", unit="counts", description="Counts the total number of times we execute a scenario", ) rec_svc_metrics = {"scenario_execution_counter": scenario_execution_counter} return rec_svc_metrics
import time from pathlib import Path from taipy import Config, Status import taipy as tp from taipy.gui import get_state_id, invoke_callback from metrics import init_metrics, tracer # Telemetry rec_svc_metrics = init_metrics() @tracer.start_as_current_span("function_double") def double(nb): """Double the given number.""" time.sleep(1) return int(nb) * 2 Config.load(Path(__file__).parent / "config.toml") SCENARIO = Config.scenarios["my_scenario"] # pylint: disable=no-member value = 21 # pylint: disable=invalid-name result = double(value) # pylint: disable=invalid-name CONTENT = """ * You can double this number: <|{value}|input|propagate=True|> * by clicking on this button: <|Double it!|button|on_action=on_button_click|> * and here is the result: <|{result}|> """ gui = tp.Gui(page=CONTENT) core = tp.Core() def job_updated(state_id, scenario, job): """Callback called when a job has been updated.""" if job.status == Status.COMPLETED: def _update_result(state, output): state.result = output.read() # invoke_callback allows to run a function with a GUI _state_. invoke_callback(gui, state_id, _update_result, args=[scenario.output]) # Telemetry rec_svc_metrics["scenario_execution_counter"].add( 1, {"execution_type": "manual", "scenario_name": scenario.config_id} ) def on_button_click(state): """callback for button clicked""" state_id = get_state_id(state) my_scenario = tp.create_scenario(SCENARIO) my_scenario.input.write(state.value) tp.subscribe_scenario(scenario=my_scenario, callback=job_updated, params=[state_id]) tp.submit(my_scenario) if __name__ == "__main__": tp.run( gui, core, host="0.0.0.0", title="Basic Taipy App", )
from taipy.gui import Gui from tensorflow.keras import models from PIL import Image import numpy as np class_names = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck', } model = models.load_model("baseline_mariya.keras") def predict_image(model, path_to_img): img = Image.open(path_to_img) img = img.convert("RGB") img = img.resize((32, 32)) data = np.asarray(img) # print("before:",data[0][0]) data = data / 255 # print("after:",data[0][0]) probs = model.predict(np.array([data])[:1]) # print(probs) # print(probs.max()) top_prob = probs.max() # print(np.argmax(probs)) top_pred = class_names[np.argmax(probs)] return top_prob, top_pred content = "" img_path = "placeholder_image.png" prob = 0 pred = "" index = """ <|text-center| <|{"logo.png"}|image|width = 25vw|> <|{content}|file_selector|extensions = .png|> select an image from your file system <|{pred}|> <|{img_path}|image|> <|{prob}|indicator|value = {prob}|min = 0|max = 100|width = 25vw|> |> """ def on_change(state, var_name, var_val): if var_name == "content": top_prob, top_pred = predict_image(model, var_val) state.prob = round(top_prob * 100) state.pred = "This is a " + top_pred state.img_path = var_val # print(var_name,var_val) app = Gui(page=index) if __name__ == "__main__": app.run()
Mac OS X  2°âATTRÿÿÿ[☠˜ com.apple.provenanceÙ™ÞFwï¥This resource fork intentionally left blank ÿÿ
def a
import os import threading from flask import Flask from pyngrok import ngrok from hf_hub_ctranslate2 import GeneratorCT2fromHfHub from flask import request, jsonify model_name = "taipy5-ct2" # note this is local folder model, the model uploaded to huggingface did not response correctly #model_name = "michaelfeil/ct2fast-starchat-alpha" #model_name = "michaelfeil/ct2fast-starchat-beta" model = GeneratorCT2fromHfHub( # load in int8 on CUDA model_name_or_path=model_name, device="cuda", compute_type="int8_float16", # tokenizer=AutoTokenizer.from_pretrained("{ORG}/{NAME}") ) def generate_text_batch(prompt_texts, max_length=64): outputs = model.generate(prompt_texts, max_length=max_length, include_prompt_in_result=False) return outputs app = Flask(__name__) port = "5000" # Open a ngrok tunnel to the HTTP server public_url = ngrok.connect(port).public_url print(" * ngrok tunnel \"{}\" -> \"http://127.0.0.1:{}\"".format(public_url, port)) # Update any base URLs to use the public ngrok URL app.config["BASE_URL"] = public_url # ... Update inbound traffic via APIs to use the public-facing ngrok URL # Define Flask routes @app.route("/") def index(): return "Hello from Colab!" @app.route("/api/generate", methods=["POST"]) def generate_code(): try: # Get the JSON data from the request body data = request.get_json() # Extract 'inputs' and 'parameters' from the JSON data inputs = data.get('inputs', "") parameters = data.get('parameters', {}) # Extract the 'max_new_tokens' parameter max_new_tokens = parameters.get('max_new_tokens', 64) # Call the generate_text_batch function with inputs and max_new_tokens generated_text = generate_text_batch([inputs], max_new_tokens)[0] return jsonify({ "generated_text": generated_text, "status": 200 }) except Exception as e: return jsonify({"error": str(e)}) # Start the Flask server in a new thread threading.Thread(target=app.run, kwargs={"use_reloader": False}).start()
#Imports from taipy.gui import Gui from tensorflow.keras import models from PIL import Image import numpy as np #Variables classes = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck' } model = models.load_model("baseline_model.keras") content="" img_path = 'placeholder_image.png' prob = 0 pred = "" #Define model prediction function def predict_image(model, path): img = Image.open(path) img = img.convert("RGB") img = img.resize((32, 32)) data = np.asarray(img) data = data / 255 data = np.array([data]) probs = model.predict(data) top_prob = probs.max() top_pred = classes.get(np.argmax(probs)) return top_prob, top_pred #Define the app as a Gui index = """ <|text-center| <|{"logo.png"}|image|> <|{content}|file_selector|extensions=.png|> Select an image to classify <|{pred}|> <|{img_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=22vw|> > """ def on_change(state, var, val): if var == "content": top_prob, top_pred = predict_image(model, val) state.img_path = val state.prob = round(top_prob * 100) state.pred = f"This is a {top_pred}" #print(state,var,val) app = Gui(page = index) #Run script if __name__ =="__main__": app.run(use_reloader=True)
from taipy.gui import Gui, notify import pandas as pd import webbrowser import datetime import os DOWNLOAD_PATH = "data/download.csv" upload_file = None section_1 = """ <center> <|navbar|lov={[("page1", "This Page"), ("https://docs.taipy.io/en/latest/manuals/about/", "Taipy Docs"), ("https://docs.taipy.io/en/latest/getting_started/", "Getting Started")]}|> </center> Data Dashboard with Taipy ========================= <|layout|columns=1 3| <| ### Let's create a simple Data Dashboard! <br/> <center> <|{upload_file}|file_selector|label=Upload Dataset|> </center> |> <| <center> <|{logo}|image|height=250px|width=250px|on_action=image_action|> </center> |> |> """ section_2 = """ ## Data Visualization <|{dataset}|chart|mode=lines|x=Date|y[1]=MinTemp|y[2]=MaxTemp|color[1]=blue|color[2]=red|> """ section_3 = """ <|layout|columns= 1 5| <| ## Custom Parameters **Starting Date**\n\n<|{start_date}|date|not with_time|on_change=start_date_onchange|> <br/><br/> **Ending Date**\n\n<|{end_date}|date|not with_time|on_change=end_date_onchange|> <br/> <br/> <|button|label=GO|on_action=button_action|> |> <| <center> <h2>Dataset</h2><|{DOWNLOAD_PATH}|file_download|on_action=download|> <|{dataset}|table|page_size=10|height=500px|width=65%|> </center> |> |> """ def image_action(state): webbrowser.open("https://taipy.io") def get_data(path: str): dataset = pd.read_csv(path) dataset["Date"] = pd.to_datetime(dataset["Date"]).dt.date return dataset def start_date_onchange(state, var_name, value): state.start_date = value.date() def end_date_onchange(state, var_name, value): state.end_date = value.date() def filter_by_date_range(dataset, start_date, end_date): mask = (dataset['Date'] > start_date) & (dataset['Date'] <= end_date) return dataset.loc[mask] def button_action(state): state.dataset = filter_by_date_range(dataset, state.start_date, state.end_date) notify(state, "info", "Updated date range from {} to {}.".format(state.start_date.strftime("%m/%d/%Y"), state.end_date.strftime("%m/%d/%Y"))) def download(state): state.dataset.to_csv(DOWNLOAD_PATH) logo = "images/taipy_logo.jpg" dataset = get_data("data/weather.csv") start_date = datetime.date(2008, 12, 1) end_date = datetime.date(2017, 6, 25) gui = Gui(page=section_1+section_2+section_3) if __name__ == '__main__': # the options in the gui.run() are optional, try without them gui.run(title='Taipy Demo GUI 2', host='0.0.0.0', port=os.environ.get('PORT', '5050'), dark_mode=False) else: app = gui.run(title='Taipy Demo GUI 2', dark_mode=False, run_server=False)
from taipy.gui import Gui from pages.all_regions import * from pages.by_region import * # Toggle theme: switch dark/light mode root_md = """ <|toggle|theme|> <center>\n<|navbar|>\n</center> """ stylekit = { "color-primary": "#CC3333", "color-secondary": "#E0C095", "color-background-light": "#F7E7CE", "color-background-dark": "#E0C095", } pages = {"/": root_md, "all_regions": all_regions_md, "by_region": by_region_md} gui_multi_pages = Gui(pages=pages) if __name__ == "__main__": gui_multi_pages.run( use_reloader=True, title="Wine 🍷 production by Region and Year", dark_mode=False, stylekit=stylekit, )
import geopandas as gpd import pandas as pd def add_basic_stats(df_wine: pd.DataFrame) -> pd.DataFrame: """Add basic statistics to a DataFrame containing wine production data. This function calculates the minimum, maximum, and average wine production values for each row in the input DataFrame based on yearly data. The resulting DataFrame includes three additional columns: 'min', 'max', and 'average'. Args: df_wine (pd.DataFrame): A DataFrame containing wine production data for various French wine regions. Returns: df_wine_with_stats (pd.DataFrame): A new DataFrame with additional columns ('min', 'max', 'average') representing the calculated statistics for each row. """ df_wine_with_stats = df_wine.copy() df_wine_years = df_wine_with_stats[ [ "08/09", "09/10", "10/11", "11/12", "12/13", "13/14", "14/15", "15/16", "16/17", "17/18", "18/19", ] ] df_wine_with_stats["min"] = df_wine_years.min(axis=1) df_wine_with_stats["max"] = df_wine_years.max(axis=1) df_wine_with_stats["average"] = round(df_wine_years.mean(axis=1), 2) # Rename the "wine_basin" column for better readability in the dashboard df_wine_with_stats = df_wine_with_stats.rename(columns={"wine_basin": "Region"}) return df_wine_with_stats def add_geometry( df_wine_with_stats: pd.DataFrame, geometry: pd.DataFrame ) -> pd.DataFrame: """Add geographical information to a DataFrame containing wine production statistics. This function takes a DataFrame with wine production statistics (`df_wine_with_stats`) and a DataFrame with geographical information (`geometry`). It adds geographical data to the wine production DataFrame (including latitude and longitude). Args: df_wine_with_stats (pd.DataFrame): DataFrame containing wine production statistics for various regions and wine types. geometry (pd.DataFrame): DataFrame with geometrical information, including the 'geometry' column containing the geographical shapes. Returns: df_wine_with_geometry (pd.DataFrame): A new DataFrame with additional geographical information, including latitude and longitude, added to the wine production data. """ df_geometry = gpd.GeoDataFrame.from_features(geometry, crs=3857) # Reproject to EPSG:4326 to be able to extract Lon and Lat df_geometry = df_geometry.to_crs(epsg=4326) df_wine_with_geometry = df_wine_with_stats.copy() # Drop the rows that are subsets (so we don't count in aggregation) rows_to_drop = df_wine_with_geometry["AOC"].str.contains("(subset)") df_wine_with_geometry = df_wine_with_geometry.drop( df_wine_with_geometry[rows_to_drop].index ) df_wine_with_geometry = df_wine_with_geometry.reset_index(drop=True) # Drop the "AOC" column, as well as "min" and "max" df_wine_with_geometry = df_wine_with_geometry.drop(["AOC", "min", "max"], axis=1) df_wine_with_geometry = df_wine_with_geometry.groupby(["Region", "wine_type"]).sum() df_wine_with_geometry = df_wine_with_geometry.reset_index() df_wine_with_geometry = df_wine_with_geometry.sort_values( by=["average"], ascending=False ).reset_index(drop=True) # Extract latitude and longitude df_geometry["latitude"] = df_geometry["geometry"].y df_geometry["longitude"] = df_geometry["geometry"].x df_geometry = df_geometry.drop("geometry", axis=1) df_wine_with_geometry = pd.merge( df_wine_with_geometry, df_geometry, left_on="Region", right_on="Bassin" ) df_wine_with_geometry = df_wine_with_geometry.drop("Bassin", axis=1) return df_wine_with_geometry
import taipy.core as tp from taipy import Config # Loading of the TOML Config.load("config/taipy-config.toml") tp.Core().run() # Get the scenario configuration scenario_cfg = Config.scenarios["SC_WINE"] sc_wine = tp.create_scenario(scenario_cfg) sc_wine.submit() df_wine_production = sc_wine.WINE_PRODUCTION_WITH_STATS.read() df_wine_with_geometry = sc_wine.WINE_PRODUCTION_WITH_GEOMETRY.read()
from typing import Any import pandas as pd from config.config import df_wine_production, df_wine_with_geometry selected_year = "average" year_list = [ "average", "08/09", "09/10", "10/11", "11/12", "12/13", "13/14", "14/15", "15/16", "16/17", "17/18", "18/19", ] area_type_list = ["AOC", "Region"] selected_area = area_type_list[0] def get_df_map_color( year: str, color: str, df_wine_with_geometry: pd.DataFrame = df_wine_with_geometry ) -> pd.DataFrame: """Create a DataFrame for map coloring based on wine production data. This function takes a specific year and wine color, and extracts relevant information from the original wine production DataFrame (`df_wine_with_geometry`). It creates a DataFrame suitable for map coloring, including information about regions, latitude, longitude, production, size, and text (a field to display the hover in the map). Args: year (str): The selected year for wine production data. color (str): The selected wine color (e.g., 'RED AND ROSE', 'WHITE'). df_wine_with_geometry (pd.DataFrame, optional): DataFrame containing wine production and geographical information. Defaults to the global variable `df_wine_with_geometry`. Returns: df_map_color (pd.DataFrame): A DataFrame with information for map coloring. """ df_geometry_color = df_wine_with_geometry[ df_wine_with_geometry["wine_type"] == color ].copy() df_map_color = df_geometry_color[["Region", "latitude", "longitude"]] # Production is divided by 10 to show million Liters df_map_color["Production"] = df_geometry_color[year] / 10 # this is only to display and acceptable size of the dots on the map, dividing by 5 is arbitrary: df_map_color["size"] = df_map_color["Production"] / 5 df_map_color["text"] = ( df_map_color["Region"] + ": " + df_map_color["Production"].astype(str) + " Ml" ) print(df_map_color.head()) return df_map_color def get_df_wine_year_and_area( year: str, area_type: str, df_wine_production: pd.DataFrame = df_wine_production ) -> pd.DataFrame: """Create a DataFrame for wine production based on a specific year and area type. This function takes a specific year and area type, and extracts relevant information from the original wine production DataFrame (`df_wine_production`). It creates a DataFrame suitable for displaying wine production data for a specified year and area type. Args: year (str): The selected year for wine production data. area_type (str): The selected area type (e.g., 'AOC', 'Region'). df_wine_production (pd.DataFrame, optional): DataFrame containing wine production data. Defaults to the global variable `df_wine_production`. Returns: df_wine_year (pd.DataFrame): A DataFrame with information for displaying wine production data. """ df_wine_year = df_wine_production[[area_type, "wine_type"]].copy() # Production is divided by 10 to show million Liters df_wine_year["Production"] = df_wine_production[year] / 10 df_wine_year = df_wine_year.reset_index(drop=True) df_wine_year = df_wine_year.rename(columns={area_type: "Region"}) df_wine_year = df_wine_year.groupby(["Region", "wine_type"]).sum() df_wine_year = df_wine_year.reset_index() df_wine_year = df_wine_year.sort_values(by=["Production"]) # Add a column to display a cleaner and shorter label in the bar chart df_wine_year["Wine Region"] = ( (df_wine_year["Region"].str.replace(r"[a-zA-Z]/ ", "", regex=True)) .str.replace(r" \(.+", "", regex=True) .str.replace(r" including.+", "", regex=True) ) return df_wine_year df_wine_year = get_df_wine_year_and_area(selected_year, selected_area) df_map_red = get_df_map_color(selected_year, "RED AND ROSE") df_map_white = get_df_map_color(selected_year, "WHITE") # Variables for the labels: total_production = df_wine_year["Production"].sum() red_rose_production = df_wine_year[df_wine_year["wine_type"] == "RED AND ROSE"][ "Production" ].sum() white_production = df_wine_year[df_wine_year["wine_type"] == "WHITE"][ "Production" ].sum() def on_change(state: Any) -> None: """Update state based on a change in selected year and area type. This function takes the current state (`state`) and updates relevant attributes based on a change in the selected year and area type. It calculates total production, production for red and white wines, and updates map DataFrames for red and white wines. Args: state (Any): The current state object. Returns: None """ print("Chosen year: ", state.selected_year) print("Choose region type: ", state.selected_area) state.df_wine_year = get_df_wine_year_and_area( state.selected_year, state.selected_area ) # Update the labels: state.total_production = state.df_wine_year["Production"].sum() state.red_rose_production = state.df_wine_year[ df_wine_year["wine_type"] == "RED AND ROSE" ]["Production"].sum() state.white_production = state.df_wine_year[df_wine_year["wine_type"] == "WHITE"][ "Production" ].sum() # Update map dataframes: state.df_map_red = get_df_map_color(state.selected_year, "RED AND ROSE") state.df_map_white = get_df_map_color(state.selected_year, "WHITE") ############################################################################################################## ## Chart properties: ## ############################################################################################################## bar_chart_layout = { "yaxis": {"range": [0, 600]}, "xaxis": {"automargin": True}, "xlabel": "None", } property_barchart_red_rose = { "type": "bar", "x": "Wine Region", "y[1]": "Production", "color[1]": "#900020", "title": "Production of Red wines by Region (Million Liters)", } property_barchart_white = { "type": "bar", "x": "Wine Region", "y[1]": "Production", "color[1]": "#E0C095", "title": f"Production of White wines by Region (Million Liters)", } ############################################################################################################## ## For the map: ## ############################################################################################################## marker_map_white = { "color": "Production", "size": "size", "showscale": True, "colorscale": "Viridis", # No better colormap found } marker_map_red = { "color": "Production", "size": "size", "showscale": True, "colorscale": "RdBu", } layout_map_red = { "title": "Production of red wines, per Region - Million Liters", "dragmode": "zoom", "mapbox": { # "style": "stamen-toner", "style": "open-street-map", "center": {"lat": 46, "lon": 1.9}, "zoom": 5, }, } layout_map_white = { "title": "Production of white wines, per Region - Million Liters", "dragmode": "zoom", "mapbox": { # "style": "stamen-toner", "style": "open-street-map", "center": {"lat": 46, "lon": 1.9}, "zoom": 5, }, } options_map = { "unselected": {"marker": {"opacity": 0.8}}, "hovertemplate": "<b>%{text}</b>" + "<extra></extra>", } ############################################################################################################## ## Taipy Code: ## ############################################################################################################## all_regions_md = """ <|{selected_year}|selector|lov={year_list}|on_change=on_change|dropdown|label=Choose Year|> # AOC Wine production | **<|{selected_year}|text|raw|> Campaign**{: .color-primary} | All Regions <|layout|columns=1 1 1| <|card card-bg| ## **Total:**{: .color-primary} \n ### <|{f'{int(total_production / 10) } Million Liters'}|> |> <|card card-bg| ## **Red / Rosé:**{: .color-primary} \n ###<|{f'{int(red_rose_production / 10) } Million Liters'}|> |> <|card card-bg| ## **White:**{: .color-primary} \n ###<|{f'{int(white_production / 10) } Million Liters'}|> |> |> ## Production **by <|{selected_area}|text|raw|>**{: .color-primary} <|{selected_area}|toggle|lov={area_type_list}|on_change=on_change|> <|layout|columns=1 1| <|{df_wine_year[df_wine_year["wine_type"] == "RED AND ROSE"]}|chart|properties={property_barchart_red_rose}|layout={bar_chart_layout}|height=800px|> <|{df_wine_year[df_wine_year["wine_type"] == "WHITE"]}|chart|properties={property_barchart_white}|layout={bar_chart_layout}|height=800px|> |> ## Production Maps, **<|{selected_year}|text|raw|>**{: .color-primary}: <|layout|columns=1 1| <|{df_map_red}|chart|type=scattermapbox|lat=latitude|lon=longitude|marker={marker_map_red}|layout={layout_map_red}|text=text|mode=markers|height=600px|options={options_map}|> <|{df_map_white}|chart|type=scattermapbox|lat=latitude|lon=longitude|marker={marker_map_white}|layout={layout_map_white}|text=text|mode=markers|height=600px|options={options_map}|> |> ## Data for all the regions: <|{df_wine_production}|table|height=400px|width=100%|filter[AOC]=True|filter[Region]=True|filter[wine_type]=True|> """
from typing import Any, Tuple import pandas as pd from config.config import df_wine_with_geometry list_of_regions = df_wine_with_geometry["Region"].unique().tolist() selected_region = "SUD-OUEST" def clean_df_region_color(df_region_color: pd.DataFrame) -> pd.DataFrame: """Clean and transform a DataFrame containing region color information. This function takes a DataFrame (`df_region_color`) containing color information for a specific wine region. It performs cleaning operations, including dropping unnecessary columns and transposing the DataFrame for better representation. Args: df_region_color (pd.DataFrame): DataFrame containing color information for a specific wine region. Returns: pd.DataFrame: A cleaned and transformed DataFrame with columns 'Harvest' and 'years'. """ df_region_color_clean = df_region_color.drop( ["Region", "wine_type", "average", "latitude", "longitude"], axis=1 ) years = df_region_color_clean.columns df_region_color_clean = df_region_color_clean.transpose().rename( columns={0: "Harvest"} ) df_region_color_clean["Harvest"] = df_region_color_clean["Harvest"] / 10 df_region_color_clean["years"] = years return df_region_color_clean def create_df_region(selected_region: str) -> Tuple[pd.DataFrame, pd.DataFrame]: """Create DataFrames for red and white wine production statistics for a selected region. This function takes a selected region (`selected_region`) and extracts relevant information from the original wine production DataFrame (`df_wine_with_geometry`). It creates separate DataFrames for red and white wine production, applying additional cleaning operations. Args: selected_region (str): The selected wine region. Returns: Tuple[pd.DataFrame, pd.DataFrame]: A tuple containing two DataFrames - one for red wine ('df_region_red') and one for white wine ('df_region_white'). """ df_region = df_wine_with_geometry.copy() df_region = df_region[df_region["Region"] == selected_region] df_region_red = df_region[df_region["wine_type"] == "RED AND ROSE"].reset_index( drop=True ) df_region_white = df_region[df_region["wine_type"] == "WHITE"].reset_index( drop=True ) if selected_region not in ("CHAMPAGNE", "ALSACE ET EST"): df_region_red = clean_df_region_color(df_region_red) else: df_region_red = pd.DataFrame.from_dict( { "Harvest": [0] * 10, "years": [ "08/09", "09/10", "10/11", "11/12", "12/13", "13/14", "14/15", "15/16", "17/18", "18/19", ], } ) df_region_white = clean_df_region_color(df_region_white) return (df_region_red, df_region_white) df_region_red, df_region_white = create_df_region(selected_region) def on_change_region(state: Any) -> None: """Update red and white wine DataFrames based on a change in the selected region. This function takes the current state (`state`) and updates the red and white wine DataFrames (`df_region_red` and `df_region_white`) based on a change in the selected region. Args: state (Any): The current state object. Returns: None """ state.df_region_red, state.df_region_white = create_df_region(state.selected_region) ############################################################################################################## ## Chart properties: ## ############################################################################################################## plot_chart_layout = {"yaxis": {"range": [0, 600]}} property_plot_white = { "type": "scatter", "mode": "lines", "x": "years", "y": "Harvest", "color": "#E0C095", "title": f"Production of White wines (Million Liters)", } property_plot_red = { "type": "scatter", "mode": "lines", "x": "years", "y": "Harvest", "color": "#900020", "title": f"Production of Red wines (Million Liters)", } ############################################################################################################## ## Taipy Code: ## ############################################################################################################## by_region_md = """ <|layout|columns= 1 2| <|{selected_region}|selector|lov={list_of_regions}|on_change=on_change_region|dropdown|label=Choose Region|> # Wine production | **by Region**{: .color-primary} |> # Charts: <|layout|columns= 1 1| <|{df_region_red}|chart|properties={property_plot_red}|layout={plot_chart_layout}|> <|{df_region_white}|chart|properties={property_plot_white}|layout={plot_chart_layout}|> |> """
import numpy as np from taipy.gui import Gui from pages.country import country_md, on_change_country,\ selected_representation, data_country_date, pie_chart from pages.world import world_md from pages.map import map_md from pages.predictions import predictions_md, selected_scenario, result, scenario_country, scenario_date from data.data import data selector_country = list(np.sort(data['Country/Region'].astype(str).unique())) selected_country = 'France' pages = { "/":"<center><|navbar|></center>", "Country":country_md, "World":world_md, "Map":map_md, "Predictions":predictions_md } if __name__ == '__main__': gui_multi_pages = Gui(pages=pages) gui_multi_pages.run(title="Covid Dashboard", dark_mode=False, use_reloader=False, port=5039)
from taipy.config import Config, Scope import datetime as dt from algos.algos import add_features, create_train_data, preprocess,\ train_arima, train_linear_regression,\ forecast, forecast_linear_regression,\ result #Config.configure_job_executions(mode="standalone", nb_of_workers=2) path_to_data = "data/covid-19-all.csv" initial_data_cfg = Config.configure_data_node(id="initial_data", storage_type="csv", path=path_to_data, cacheable=True, validity_period=dt.timedelta(days=5), scope=Scope.GLOBAL) country_cfg = Config.configure_data_node(id="country", default_data="France", cacheable=True, validity_period=dt.timedelta(days=5)) date_cfg = Config.configure_data_node(id="date", default_data=dt.datetime(2020,10,1), cacheable=True, validity_period=dt.timedelta(days=5)) final_data_cfg = Config.configure_data_node(id="final_data", cacheable=True, validity_period=dt.timedelta(days=5)) train_data_cfg = Config.configure_data_node(id="train_data", cacheable=True, validity_period=dt.timedelta(days=5)) task_preprocess_cfg = Config.configure_task(id="task_preprocess_data", function=preprocess, input=[initial_data_cfg, country_cfg, date_cfg], output=[final_data_cfg,train_data_cfg]) model_cfg = Config.configure_data_node(id="model", cacheable=True, validity_period=dt.timedelta(days=5), scope=Scope.PIPELINE) task_train_cfg = Config.configure_task(id="task_train", function=train_arima, input=train_data_cfg, output=model_cfg) predictions_cfg = Config.configure_data_node(id="predictions", scope=Scope.PIPELINE) task_forecast_cfg = Config.configure_task(id="task_forecast", function=forecast, input=model_cfg, output=predictions_cfg) result_cfg = Config.configure_data_node(id="result", scope=Scope.PIPELINE) task_result_cfg = Config.configure_task(id="task_result", function=result, input=[final_data_cfg, predictions_cfg, date_cfg], output=result_cfg) pipeline_preprocessing_cfg = Config.configure_pipeline(id="pipeline_preprocessing", task_configs=[task_preprocess_cfg]) pipeline_arima_cfg = Config.configure_pipeline(id="ARIMA", task_configs=[task_train_cfg, task_forecast_cfg, task_result_cfg]) task_train_cfg = Config.configure_task(id="task_train_linear_regression", function=train_linear_regression, input=train_data_cfg, output=model_cfg) task_forecast_cfg = Config.configure_task(id="task_forecast_linear_regression", function=forecast_linear_regression, input=[model_cfg, date_cfg], output=predictions_cfg) pipeline_linear_regression_cfg = Config.configure_pipeline(id="LinearRegression", task_configs=[task_train_cfg, task_forecast_cfg, task_result_cfg]) scenario_cfg = Config.configure_scenario(id='scenario', pipeline_configs=[pipeline_preprocessing_cfg, pipeline_arima_cfg, pipeline_linear_regression_cfg])
import pandas as pd from pmdarima import auto_arima from sklearn.linear_model import LinearRegression import datetime as dt import numpy as np def add_features(data): dates = pd.to_datetime(data["Date"]) data["Months"] = dates.dt.month data["Days"] = dates.dt.isocalendar().day data["Week"] = dates.dt.isocalendar().week data["Day of week"] = dates.dt.dayofweek return data def create_train_data(final_data, date): bool_index = pd.to_datetime(final_data['Date']) <= date train_data = final_data[bool_index] return train_data def preprocess(initial_data, country, date): data = initial_data.groupby(["Country/Region",'Date'])\ .sum()\ .dropna()\ .reset_index() final_data = data.loc[data['Country/Region']==country].reset_index(drop=True) final_data = final_data[['Date','Deaths']] final_data = add_features(final_data) train_data = create_train_data(final_data, date) return final_data, train_data def train_arima(train_data): model = auto_arima(train_data['Deaths'], start_p=1, start_q=1, max_p=5, max_q=5, start_P=0, seasonal=False, d=1, D=1, trace=True, error_action='ignore', suppress_warnings=True) model.fit(train_data['Deaths']) return model def forecast(model): predictions = model.predict(n_periods=60) return np.array(predictions) def result(final_data, predictions, date): dates = pd.to_datetime([date + dt.timedelta(days=i) for i in range(len(predictions))]) final_data['Date'] = pd.to_datetime(final_data['Date']) predictions = pd.concat([pd.Series(dates, name="Date"), pd.Series(predictions, name="Predictions")], axis=1) return final_data.merge(predictions, on="Date", how="outer") def train_linear_regression(train_data): y = train_data['Deaths'] X = train_data.drop(['Deaths','Date'], axis=1) model = LinearRegression() model.fit(X,y) return model def forecast_linear_regression(model, date): dates = pd.to_datetime([date + dt.timedelta(days=i) for i in range(60)]) X = add_features(pd.DataFrame({"Date":dates})) X.drop('Date', axis=1, inplace=True) predictions = model.predict(X) return predictions
import pandas as pd path_to_data = "data/covid-19-all.csv" data = pd.read_csv(path_to_data, low_memory=False)
from taipy.gui import Markdown import numpy as np import json from data.data import data type_selector = ['Absolute', 'Relative'] selected_type = type_selector[0] def initialize_world(data): data_world = data.groupby(["Country/Region", 'Date'])\ .sum()\ .reset_index() with open("data/pop.json","r") as f: pop = json.load(f) data_world['Population'] = [0]*len(data_world) for i in range(len(data_world)): data_world['Population'][i] = pop[data_world.loc[i, "Country/Region"]][1] data_world = data_world.dropna()\ .reset_index() data_world['Deaths/100k'] = data_world.loc[:,'Deaths']/data_world.loc[:,'Population']*100000 data_world_pie_absolute = data_world.groupby(["Country/Region"])\ .max()\ .sort_values(by='Deaths', ascending=False)[:20]\ .reset_index() data_world_pie_relative = data_world.groupby(["Country/Region"])\ .max()\ .sort_values(by='Deaths/100k', ascending=False)[:20]\ .reset_index()\ .drop(columns=['Deaths']) country_absolute = data_world_pie_absolute['Country/Region'].unique().tolist() country_relative = data_world_pie_relative.loc[:,'Country/Region'].unique().tolist() data_world_evolution_absolute = data_world[data_world['Country/Region'].str.contains('|'.join(country_absolute),regex=True)] data_world_evolution_absolute = data_world_evolution_absolute.pivot(index='Date', columns='Country/Region', values='Deaths')\ .reset_index() data_world_evolution_relative = data_world[data_world['Country/Region'].str.contains('|'.join(country_relative),regex=True)] data_world_evolution_relative = data_world_evolution_relative.pivot(index='Date', columns='Country/Region', values='Deaths/100k')\ .reset_index() return data_world, data_world_pie_absolute, data_world_pie_relative, data_world_evolution_absolute, data_world_evolution_relative data_world,\ data_world_pie_absolute, data_world_pie_relative,\ data_world_evolution_absolute, data_world_evolution_relative = initialize_world(data) data_world_evolution_absolute_properties = {"x":"Date"} cols = [col for col in data_world_evolution_absolute.columns if col != "Date"] for i in range(len(cols)): data_world_evolution_absolute_properties[f'y[{i}]'] = cols[i] data_world_evolution_relative_properties = {"x":"Date"} cols = [col for col in data_world_evolution_relative.columns if col != "Date"] for i in range(len(cols)): data_world_evolution_relative_properties[f'y[{i}]'] = cols[i] world_md = Markdown(""" # **World**{: .color-primary} Statistics <|{selected_type}|toggle|lov={type_selector}|> <|layout|columns=1 1 1 1|gap=30px| <|card m1| #### **Deaths**{: .color-primary} <br/> # <|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Deaths']))).replace(',', ' ')}|text|raw|> |> <|card m1| #### **Recovered**{: .color-primary} <br/> # <|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Recovered']))).replace(',', ' ')}|text|raw|> |> <|card m1| #### **Confirmed**{: .color-primary} <br/> # <|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Confirmed']))).replace(',', ' ')}|text|raw|> |> |> <br/> <|part|render={selected_type=='Absolute'}| <|layout|columns=1 2|gap=30px| <|{data_world_pie_absolute}|chart|type=pie|label=Country/Region|x=Deaths|> <|{data_world_evolution_absolute}|chart|properties={data_world_evolution_absolute_properties}|> |> |> <|part|render={selected_type=='Relative'}| <|layout|columns=1 2|gap=30px| <|{data_world_pie_relative}|chart|type=pie|label=Country/Region|x=Deaths/100k|> <|{data_world_evolution_relative}|chart|properties={data_world_evolution_relative_properties}|> |> |> """)
from taipy.gui import Markdown, invoke_long_callback import taipy as tp import pandas as pd import datetime as dt from config.config import scenario_cfg scenario_selector = [(s.id, s.name) for s in tp.get_scenarios()] selected_scenario = None selected_date = dt.datetime(2020,10,1) scenario_country = "No selected scenario" scenario_date = "No selected scenario" scenario_name = "" result = pd.DataFrame({"Date":[dt.datetime(2020,1,1)], "Deaths_x":[0],"Deaths_y":[0], "Predictions_x":[0],"Predictions_y":[0]}) def create_new_scenario(state): if state.scenario_name is not None or state.scenario_name == "": scenario = tp.create_scenario(scenario_cfg, name=state.scenario_name) state.scenario_selector += [(scenario.id, scenario.name)] state.selected_scenario = scenario.id actualize_graph(state) def submit_heavy(scenario): tp.submit(scenario) def submit_status(state, status): actualize_graph(state) def submit_scenario(state): # 1) get the selected scenario # 2) write in country Data Node, the selected country # 3) submit the scenario # 4) actualize le graph avec actualize_graph if state.selected_scenario is not None: scenario = tp.get(state.selected_scenario) scenario.country.write(state.selected_country) scenario.date.write(state.selected_date.replace(tzinfo=None)) invoke_long_callback(state, submit_heavy, [scenario], submit_status) def actualize_graph(state): # 1) update the result dataframe # 2) change selected_country with the predicted country of the scenario if state.selected_scenario is not None: scenario = tp.get(state.selected_scenario) result_arima = scenario.pipelines['ARIMA'].result.read() result_rd = scenario.pipelines['LinearRegression'].result.read() if result_arima is not None and result_rd is not None: state.result = result_rd.merge(result_arima, on="Date", how="outer").sort_values(by='Date') else: state.result = result state.scenario_country = scenario.country.read() state.scenario_date = scenario.date.read().strftime("%d %B %Y") predictions_md = Markdown(""" # **Prediction**{: .color-primary} page ## Scenario Creation <|layout|columns=5 5 5 5|gap=30px| **Scenario Name** <br/> <|{scenario_name}|input|label=Name|> <br/> <|Create|button|on_action=create_new_scenario|> **Prediction Date** <br/> <|{selected_date}|date|label=Prediction date|> <|Submit|button|on_action=submit_scenario|> **Country** <br/> <|{selected_country}|selector|lov={selector_country}|dropdown|on_change=on_change_country|label=Country|> |> --------------------------------------- ## Result <|layout|columns=2 3 3|gap=30px| <|{selected_scenario}|selector|lov={scenario_selector}|on_change=actualize_graph|dropdown|value_by_id|label=Scenario|> <|card m1| #### Country of prediction : <|{scenario_country}|> |> <|card m1| #### Date of prediction : <|{scenario_date}|> |> |> <br/> <|{result}|chart|x=Date|y[1]=Deaths_x|type[1]=bar|y[2]=Predictions_x|y[3]=Predictions_y|> """)
import numpy as np from taipy.gui import Markdown from data.data import data marker_map = {"color":"Deaths", "size": "Size", "showscale":True, "colorscale":"Viridis"} layout_map = { "dragmode": "zoom", "mapbox": { "style": "open-street-map", "center": { "lat": 38, "lon": -90 }, "zoom": 3} } options = {"unselected":{"marker":{"opacity":0.5}}} def initialize_map(data): data['Province/State'] = data['Province/State'].fillna(data["Country/Region"]) data_province = data.groupby(["Country/Region", 'Province/State', 'Longitude', 'Latitude'])\ .max() data_province_displayed = data_province[data_province['Deaths']>10].reset_index() data_province_displayed['Size'] = np.sqrt(data_province_displayed.loc[:,'Deaths']/data_province_displayed.loc[:,'Deaths'].max())*80 + 3 data_province_displayed['Text'] = data_province_displayed.loc[:,'Deaths'].astype(str) + ' deaths </br> ' + data_province_displayed.loc[:,'Province/State'] return data_province_displayed data_province_displayed = initialize_map(data) map_md = Markdown(""" # **Map**{: .color-primary} Statistics <|{data_province_displayed}|chart|type=scattermapbox|lat=Latitude|lon=Longitude|marker={marker_map}|layout={layout_map}|text=Text|mode=markers|height=800px|options={options}|> """)
import numpy as np import pandas as pd from taipy.gui import Markdown from data.data import data selected_country = 'France' data_country_date = None representation_selector = ['Cumulative', 'Density'] selected_representation = representation_selector[0] layout = {'barmode':'stack', "hovermode":"x"} options = {"unselected":{"marker":{"opacity":0.5}}} country_md = "<|{data_country_date}|chart|type=bar|x=Date|y[1]=Deaths|y[2]=Recovered|y[3]=Confirmed|layout={layout}|options={options}|>" def initialize_case_evolution(data, selected_country='France'): # Aggregation of the dataframe to erase the regions that will not be used here data_country_date = data.groupby(["Country/Region",'Date'])\ .sum()\ .reset_index() # a country is selected, here France by default data_country_date = data_country_date.loc[data_country_date['Country/Region']==selected_country] return data_country_date data_country_date = initialize_case_evolution(data) pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"],"values": [data_country_date.iloc[-1, 6], data_country_date.iloc[-1, 5], data_country_date.iloc[-1, 4]]}) def convert_density(state): if state.selected_representation == 'Density': df_temp = state.data_country_date.copy() df_temp['Deaths'] = df_temp['Deaths'].diff().fillna(0) df_temp['Recovered'] = df_temp['Recovered'].diff().fillna(0) df_temp['Confirmed'] = df_temp['Confirmed'].diff().fillna(0) state.data_country_date = df_temp else: state.data_country_date = initialize_case_evolution(data, state.selected_country) def on_change_country(state): # state contains all the Gui variables and this is through this state variable that we can update the Gui # state.selected_country, state.data_country_date, ... # update data_country_date with the right country (use initialize_case_evolution) print("Chosen country: ", state.selected_country) state.data_country_date = initialize_case_evolution(data, state.selected_country) state.pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"], "values": [state.data_country_date.iloc[-1, 6], state.data_country_date.iloc[-1, 5], state.data_country_date.iloc[-1, 4]]}) if state.selected_representation == 'Density': convert_density(state) country_md = Markdown("pages/country.md")
from taipy.gui import Gui, notify text = "" revealed = False page = """ <|{text if text else 'Write something in the input'}|> <|{text}|input|on_change={lambda state: notify(state, 'i', s.text)}|> <|part|render={len(text)>0}| Part hidden and discovered after input |> ----- <|Push|button|on_action={lambda state: state.assign("revealed", True)}|> <|part|render={revealed}| Part hidden and discovered after button |> """ Gui(page).run()
# Write an app that calls a function every 5 seconds
from taipy.gui import Gui, Markdown, invoke_long_callback, notify import numpy as np status = 0 num_iterations = 10_000_000 pi_list = [] def pi_approx(num_iterations): k, s = 3.0, 1.0 pi_list = [] for i in range(num_iterations): s = s-((1/k) * (-1)**i) k += 2 if (i+1)%(int(num_iterations/1_000)+1) == 0: pi_list += [np.abs(4*s-np.pi)] return pi_list def heavy_status(state, status, pi_list): notify(state, 'i', f"Status parameter: {status}") if isinstance(status, bool): if status: notify(state, 'success', "Finished") state.pi_list = pi_list else: notify(state, 'error', f"An error was raised") else: state.status += 1 def on_action(state): invoke_long_callback(state, pi_approx, [int(state.num_iterations)], heavy_status, [], 2000) page = Markdown(""" How many times was the status function called? <|{status}|> ## Number of approximation <|{num_iterations}|number|label=# of approximation|> <|Approximate pie|button|on_action=on_action|> ## Evolution of approximation <|{pi_list}|chart|layout={layout}|> """) layout = { "yaxis": { "type": 'log', "autorange": True } } Gui(page).run()
from taipy.gui import Gui, notify text = "" page_1 = """ # Page 1 <|{text}|> """ page_2 = """ # Page 2 <|Raise error|button|on_action=raise_error|> """ hidden_page_3 = """ # Hidden Page """ def raise_error(state): raise(ValueError("This is an error")) def on_init(state): print("When a new client connects, this function is called") state.text = "Hello, world!" def on_change(state, var_name, var_value): notify(state, 'i', f'{var_name} changed') def on_navigate(state, page_name: str): if page_name == "hidden_page_3": return "page_1" notify(state, 'i', f'{page_name} navigated') return page_name def on_exception(state, function_name: str, ex: Exception): err = f"A problem occured in {function_name}" print(err) notify(state, 'e', err) pages = { "/":"<|navbar|>", "page_1": page_1, "page_2": page_2, "hidden_page_3": hidden_page_3} Gui(pages=pages).run(port=5003)
from taipy.gui import Gui selected = [] a = [1, 2] b = [2, 3] selector_lov = [a, b] page = """ <|{selected}|selector|lov={selector_lov}|adapter={lambda s: s.name}|> """ gui = Gui(page) gui.run()
from taipy.gui import Gui from math import cos, exp value = 10 page = """ Markdown # Taipy *Demo* Value: <|{value}|text|> <|{value}|slider|on_change=on_slider|> <|{data}|chart|> """ def on_slider(state): state.data = compute_data(state.value) def compute_data(decay:int)->list: return [cos(i/6) * exp(-i*decay/600) for i in range(100)] data = compute_data(value) Gui(page).run(use_reloader=True, port=5002)
from taipy.gui import Gui from math import cos, exp value = 10 page = """ Markdown # Taipy *Demo* """ Gui(page).run(use_reloader=True, port=5002)
# Write an app that create simple charts by poviding inputs and diplay them
from taipy.gui import Gui title = 1 md = """ <|{title}|number|on_change=change_partial|> <|part|partial={p}|> """ def change_partial(state): title_int = int(state.title) new_html = f'<h{title_int}>test{title_int}</h{title_int}>' print(new_html) state.p.update_content(state, new_html) gui = Gui(md) p = gui.add_partial("<h1>test</h1>") gui.run(port=5001)
import yfinance as yf from taipy.gui import Gui from taipy.gui.data.decimator import MinMaxDecimator, RDP, LTTB df_AAPL = yf.Ticker("AAPL").history(interval="1d", period = "100Y") df_AAPL["DATE"] = df_AAPL.index.astype(int).astype(float) n_out = 500 decimator_instance = MinMaxDecimator(n_out=n_out) decimate_data_count = len(df_AAPL) page = """ # Decimator From a data length of <|{len(df_AAPL)}|> to <|{n_out}|> ## Without decimator <|{df_AAPL}|chart|x=DATE|y=Open|> ## With decimator <|{df_AAPL}|chart|x=DATE|y=Open|decimator=decimator_instance|> """ gui = Gui(page) gui.run(port=5025)
from taipy import Config import taipy as tp def read_text(path: str) -> str: with open(path, 'r') as text_reader: data = text_reader.read() return data def write_text(data: str, path: str) -> None: with open(path, 'w') as text_writer: text_writer.write(data) historical_data_cfg = Config.configure_generic_data_node( id="historical_data", read_fct=read_text, write_fct=write_text, read_fct_args=["../data.txt"], write_fct_args=["../data.txt"]) scenario_cfg = Config.configure_scenario(id="my_scenario", additional_data_node_configs=[historical_data_cfg]) scenario = tp.create_scenario(scenario_cfg)
import taipy as tp from config.config_toml import scenario_cfg if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
import taipy as tp from config.config import scenario_cfg if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
from taipy import Config from algos.algos import * Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate", default_data=21) output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) # Configuration of the pipeline and scenario scenario_cfg = Config.configure_scenario(id="my_scenario", task_configs=[first_task_cfg, second_task_cfg]) Config.export("config_07.toml")
from taipy import Config from algos.algos import * Config.load('config/config_07.toml') Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) scenario_cfg = Config.scenarios['my_scenario']
import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 5 seconds in add function") time.sleep(5) return nb + 10
import taipy as tp from taipy.gui import Markdown from config.config import * scenario = None df_metrics = None data_node = None pages = {'/':'<|navbar|> <|toggle|theme|> <br/>', 'Scenario': Markdown('pages/scenario.md'), 'Data-Node': Markdown('pages/data_node.md')} if __name__ == "__main__": tp.Core().run() tp.Gui(pages=pages).run(port=4999)
import taipy as tp import datetime as dt from config.config import * def create_and_run_scenario(date: dt.datetime): scenario = tp.create_scenario(config=scenario_cfg, name=f"scenario_{date.date()}", creation_date=date) scenario.day.write(date) tp.submit(scenario) return scenario if __name__ == "__main__": tp.Core().run() my_first_scenario = create_and_run_scenario(dt.datetime(2021, 1, 25)) predictions = my_first_scenario.predictions.read() print("Predictions\n", predictions) [s for s in tp.get_scenarios()] [d for d in tp.get_data_nodes()]
from taipy import Config, Scope, Frequency from taipy.core import Scenario import datetime as dt from algos.algos import clean_data, predict, evaluate ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", path="data/dataset.csv", scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id="day", default_data=dt.datetime(2021, 7, 26)) ## Remaining Data Node cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset", storage_type="parquet", scope=Scope.GLOBAL) predictions_cfg = Config.configure_data_node(id="predictions") # Task config objects clean_data_task_cfg = Config.configure_task(id="clean_data", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) predict_task_cfg = Config.configure_task(id="predict", function=predict, input=[cleaned_dataset_cfg, day_cfg], output=predictions_cfg) evaluation_cfg = Config.configure_data_node(id="evaluation") evaluate_task_cfg = Config.configure_task(id="evaluate", function=evaluate, input=[predictions_cfg, cleaned_dataset_cfg, day_cfg], output=evaluation_cfg) # # Configure our scenario config. scenario_cfg = Config.configure_scenario(id="scenario", task_configs=[clean_data_task_cfg, predict_task_cfg,evaluate_task_cfg], frequency=Frequency.MONTHLY) Config.export('config/config.toml')
import datetime as dt import pandas as pd def clean_data(initial_dataset: pd.DataFrame): print(" Cleaning data") initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset[['Date', 'Value']] return cleaned_dataset def predict(cleaned_dataset: pd.DataFrame, day: dt.datetime): print(" Predicting") train_dataset = cleaned_dataset[cleaned_dataset['Date'] < pd.Timestamp(day.date())] predictions = train_dataset['Value'][-30:].reset_index(drop=True) return pd.DataFrame({"Prediction":predictions}) def evaluate(predictions, cleaned_dataset, day): print(" Evaluating") expected = cleaned_dataset.loc[cleaned_dataset['Date'] >= pd.Timestamp(day.date()), 'Value'][:30].reset_index(drop=True) mae = ((predictions['Prediction'] - expected) ** 2).mean() return int(mae)
from taipy.gui import Gui, notify import datetime as dt import yfinance as yf def get_stock_data(ticker): now = dt.date.today() past = now - dt.timedelta(days=365*2) return yf.download(ticker, past, now).reset_index() def update_ticker(state): # state.data = ... ticker = 'AAPL' data = get_stock_data(ticker) historical = """ """ Gui(historical).run()
from taipy.gui import Gui, notify import datetime as dt import yfinance as yf def get_stock_data(ticker): now = dt.date.today() past = now - dt.timedelta(days=365*2) return yf.download(ticker, past, now).reset_index() def update_ticker(state): state.data = get_stock_data(state.ticker) notify(state, "success", "Ticker updated!") ticker = 'AAPL' data = get_stock_data(ticker) historical = """ #### Stock Price **Analysis**{: .color-primary} <|{ticker}|toggle|lov=MSFT;GOOG;AAPL|on_change=update_ticker|> Mean Volume: <|{int(data['Volume'].mean())}|> <|{data}|chart|x=Date|y=Volume|> """ Gui(historical).run()
from taipy.gui import Gui import taipy as tp from pages.data_viz import historical from pages.predictions_sol import predictions, scenario ticker = 'AAPL' pages = {"/":"<|navbar|>", "Historical":historical, "Prediction":predictions} tp.Core().run() Gui(pages=pages).run(port=5001)
from datetime import timedelta import yfinance as yf from prophet import Prophet def get_data(ticker, date): past = date - timedelta(days=365*7) return yf.download(ticker, past, date).reset_index() def predict(preprocessed_dataset): df_train = preprocessed_dataset[['Date', 'Close']] df_train = df_train.rename(columns={"Date": "ds", "Close": "y"}) # This is the format that Prophet accepts m = Prophet() m.fit(df_train) future = m.make_future_dataframe(periods=365) predictions = m.predict(future)[['ds', 'yhat']].rename(columns={'ds': 'Date', 'yhat': 'Predictions'})[-365:] return predictions.reset_index()
import taipy as tp from taipy.gui import notify, Markdown import datetime as dt tp.Config.load("config/config.toml") ticker = 'AAPL' scenario = None data_node = None jobs = [] default_data = {"Date":[0], "Predictions":[0]} def on_submission_status_change(state, submittable, details): submission_status = details.get('submission_status') if submission_status == 'COMPLETED': print(f"{submittable.name} has completed.") notify(state, 'success', 'Completed!') state.refresh('scenario') elif submission_status == 'FAILED': print(f"{submittable.name} has failed.") notify(state, 'error', 'Completed!') predictions = Markdown(""" Put a scenario selector ### **Daily**{: .color-primary} Predictions <|{scenario.ticker.read() if scenario else ticker}|toggle|lov=MSFT;GOOG;AAPL|active={scenario}|on_change=save|> Put a scenario viewer <|{show_preditions(scenario)}|chart|x=Date|y=Predictions|> <|1 5|layout| Put a Data Node selector Put a Data Node |> Put a job selector """) def show_preditions(scenario): if scenario and scenario.predictions.read() is not None: print(scenario) return scenario.predictions.read() else: return default_data def save(state, var_name, var_value): state.scenario.ticker.write(var_value) state.scenario.date.write(dt.date.today()) notify(state, "success", 'Parameters saved: ready for submission!') state.scenario.predictions.write(None) state.scenario = state.scenario
import taipy as tp from taipy.gui import notify, Markdown import datetime as dt tp.Config.load("config/config.toml") ticker = 'AAPL' scenario = None data_node = None jobs = [] default_data = {"Date":[0], "Predictions":[0]} def on_submission_status_change(state, submittable, details): submission_status = details.get('submission_status') if submission_status == 'COMPLETED': print(f"{submittable.name} has completed.") notify(state, 'success', 'Completed!') state.refresh('scenario') elif submission_status == 'FAILED': print(f"{submittable.name} has failed.") notify(state, 'error', 'Completed!') predictions = Markdown(""" <|{scenario}|scenario_selector|> ### **Daily**{: .color-primary} Predictions <|{scenario.ticker.read() if scenario else ticker}|toggle|lov=MSFT;GOOG;AAPL|active={scenario}|on_change=save|> <|{scenario}|scenario|on_submission_change=on_submission_status_change|> <|{show_preditions(scenario)}|chart|x=Date|y=Predictions|> <|1 5|layout| <|{data_node}|data_node_selector|> <|{data_node}|data_node|> |> <|{jobs}|job_selector|> """) def show_preditions(scenario): if scenario and scenario.predictions.read() is not None: print(scenario) return scenario.predictions.read() else: return default_data def save(state, var_name, var_value): state.scenario.ticker.write(var_value) state.scenario.date.write(dt.date.today()) notify(state, "success", 'Parameters saved: ready for submission!') state.scenario.predictions.write(None) state.scenario = state.scenario
from taipy.gui import notify, Markdown import datetime as dt import yfinance as yf def get_stock_data(ticker): now = dt.date.today() past = now - dt.timedelta(days=365*2) return yf.download(ticker, past, now).reset_index() def update_ticker(state): state.data = get_stock_data(state.ticker) notify(state, "success", "Ticker updated!") ticker = 'AAPL' data = get_stock_data(ticker) historical = Markdown(""" ## Stock Price **Analysis**{: .color-primary} <|{ticker}|toggle|lov=MSFT;GOOG;AAPL|on_change=update_ticker|> <|{data}|chart|x=Date|y=Volume|> """)
from taipy import Gui import pandas as pd import requests API_URL = "https://api-inference.huggingface.co/models/bigcode/starcoder" headers = {"Authorization": "Bearer [ENTER-YOUR-API-KEY-HERE]"} DATA_PATH = "data.csv" df = pd.read_csv(DATA_PATH, sep=";") data = pd.DataFrame( { "Date": pd.to_datetime( [ "2020-01-01", "2020-01-02", "2020-01-03", "2020-01-04", "2020-01-05", "2020-01-06", "2020-01-07", ] ), "Sales": [100, 250, 500, 400, 450, 600, 650], "Revenue": [150, 200, 600, 800, 850, 900, 950], "Energy": ["Oil", "Coal", "Gas", "Nuclear", "Hydro", "Solar", "Wind"], "Usage": [0.33, 0.27, 0.21, 0.06, 0.05, 0.05, 0.03], } ) context = "" for instruction, code in zip(df["instruction"], df["code"]): context += f"{instruction}\n{code}\n" def query(payload: dict) -> dict: """ Queries StarCoder API Args: payload: Payload for StarCoder API Returns: dict: StarCoder API response """ response = requests.post(API_URL, headers=headers, json=payload, timeout=20) return response.json() def prompt(input_instruction: str) -> str: """ Prompts StarCoder to generate Taipy GUI code Args: instuction (str): Instruction for StarCoder Returns: str: Taipy GUI code """ current_prompt = f"{context}\n{input_instruction}\n" output = "" final_result = "" # Re-query until the output contains the closing tag timeout = 0 while ">" not in output and timeout < 10: output = query( { "inputs": current_prompt + output, "parameters": { "return_full_text": False, }, } )[0]["generated_text"] timeout += 1 final_result += output output_code = f"""<{final_result.split("<")[1].split(">")[0]}>""" return output_code def on_enter_press(state) -> None: """ Prompt StarCoder to generate Taipy GUI code when user presses enter Args: state (State): Taipy GUI state """ state.result = prompt(state.instruction) state.p.update_content(state, state.result) print(state.result) instruction = "" result = "" page = """ # Taipy**Copilot**{: .color-primary} Enter your instruction here: <|{instruction}|input|on_action=on_enter_press|class_name=fullwidth|> <|Data|expandable| <|{data[:5]}|table|width=100%|show_all=True|> |> <|part|partial={p}|> """ gui = Gui(page) p = gui.add_partial("""<|{data}|chart|mode=lines|x=Date|y=Sales|>""") gui.run()
import json def add_line(source, line, step): line = line.replace('Getting Started with Taipy Core', 'Getting Started with Taipy Core on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/en/latest/getting_started/src/') line = line.replace('(time_series.csv)', '(https://docs.taipy.io/en/latest/getting_started/src/time_series.csv)') #!!!!! line = line.replace('(time_series_2.csv)', '(https://docs.taipy.io/en/latest/getting_started/src/time_series_2.csv)') line = line.replace('!!! example "Configuration"','') line = line.replace('=== "Python configuration"','') if line.startswith('!['): if step != 'index': line = line.replace('(', '(https://raw.githubusercontent.com/Avaiga/taipy-getting-started-core/develop/' + step + '/') else: line = line.replace('(', '(https://raw.githubusercontent.com/Avaiga/taipy-getting-started-core/develop/') # conversion of Markdown image to HTML img_src = line.split('](')[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('<div align="center">\n') source.append(f' <img src="{img_src}" {width}>\n') source.append('</div>\n') elif step == 'step_00' and line.startswith('from taipy'): source.append("from taipy.gui import Gui, Markdown\n") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": ['# !pip install taipy\n'] }) notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": ['## Using Notebooks\n','Using Notebooks, you **may want to restart the kernel** after a run of Taipy Core\n'] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(1, 9)] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\n') cell = "markdown" for_studio = 0 for line in split_text: if cell == "markdown": line=line.replace(" ","") elif cell == "code" and (line[:8] == " " or len(line)<=1) and for_studio == 2: line=line[8:] elif cell == "code" and (line[:4] == " " or len(line)<=1) and for_studio == 2: line=line[4:] else: for_studio = 0 if '=== "Taipy Studio' in line: for_studio = 1 if '=== "Python configuration"' in line: for_studio = 2 if for_studio != 1: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { "cells": [], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2)
from taipy.core.config import Config, Frequency, Status import taipy as tp import datetime as dt import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate") output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) def callback_scenario_state(scenario, job): """All the scenarios are subscribed to the callback_scenario_state function. It means whenever a job is done, it is called. Depending on the job and the status, it will update the message stored in a json that is then displayed on the GUI. Args: scenario (Scenario): the scenario of the job changed job (_type_): the job that has its status changed """ print(scenario.name) if job.status == tp.core.Status.COMPLETED: for data_node in job.task.output.values(): print(data_node.read()) # Configuration of scenario scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[first_task_cfg, second_task_cfg], name="my_scenario") Config.export("config_09.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.subscribe(callback_scenario_state) scenario_1.submit(wait=True) tp.Rest().run()
from taipy.core.config import Config, Scope, Frequency import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): return len(df) Config.load('config_06.toml') # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_3 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2021,9,1), name="Scenario 2022/9/1") # scenario 1 and 2 belongs to the same cycle scenario_1.month.write(10) scenario_1.submit() # first task has already been executed by scenario 1 # because scenario 2 shares the same data node for this task scenario_2.submit() # every task has already been executed so everything will be skipped scenario_2.submit() # scenario 3 has no connection to the other scenarios so everything will be executed scenario_3.month.write(9) scenario_3.submit() # changing an input data node will make the task be reexecuted print("Scenario 3: change in historical data") scenario_3.historical_data.write(pd.read_csv('time_series_2.csv')) scenario_3.submit()
from taipy.core.config import Config, Frequency import taipy as tp # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): return nb + 10 # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate") output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) def compare_function(*data_node_results): # example of function compare_result = {} current_res_i = 0 for current_res in data_node_results: compare_result[current_res_i] = {} next_res_i = 0 for next_res in data_node_results: print(f"comparing result {current_res_i} with result {next_res_i}") compare_result[current_res_i][next_res_i] = next_res - current_res next_res_i += 1 current_res_i += 1 return compare_result scenario_cfg = Config.configure_scenario_from_tasks(id="multiply_scenario", name="my_scenario", task_configs=[first_task_cfg, second_task_cfg], comparators={output_cfg.id: compare_function}) Config.export("config_08.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.input.write(10) scenario_2.input.write(8) scenario_1.submit() scenario_2.submit() print(tp.compare_scenarios(scenario_1, scenario_2)) tp.Rest().run()
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 Config.load('config_07.toml') Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) if __name__=="__main__": scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd def filter_current(df): current_month = dt.datetime.now().month df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == current_month] return df def count_values(df): return len(df) historical_data_cfg = Config.configure_csv_data_node(id="historical_data", default_path="src/time_series.csv") month_values_cfg = Config.configure_data_node(id="month_data") nb_of_values_cfg = Config.configure_data_node(id="nb_of_values") task_filter_cfg = Config.configure_task(id="filter_current", function=filter_current, input=historical_data_cfg, output=month_values_cfg) task_count_values_cfg = Config.configure_task(id="count_values", function=count_values, input=month_values_cfg, output=nb_of_values_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[task_filter_cfg, task_count_values_cfg]) Config.export('config_03.toml') if __name__ == '__main__': tp.Core().run() scenario = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario.submit() print("Nb of values of scenario:", scenario.nb_of_values.read()) data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate", default_data=21) output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) # Configuration of the pipeline and scenario scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[first_task_cfg, second_task_cfg]) Config.export("config_07.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)