Update app.py
Browse files
app.py
CHANGED
|
@@ -1,60 +1,60 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import tempfile
|
| 3 |
import wntr
|
| 4 |
-
import
|
| 5 |
|
| 6 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 7 |
-
|
| 8 |
-
# Load model once
|
| 9 |
@st.cache_resource
|
| 10 |
def load_llm():
|
| 11 |
-
model_name = "
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 13 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
| 14 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|
| 15 |
|
| 16 |
-
# Load LLM
|
| 17 |
llm = load_llm()
|
| 18 |
|
| 19 |
-
st.title("π§ EPANET +
|
| 20 |
|
| 21 |
-
|
| 22 |
-
uploaded_file = st.file_uploader("Upload EPANET .inp file", type=["inp"])
|
| 23 |
|
| 24 |
-
# Initialize the WNTR model
|
| 25 |
wn = None
|
| 26 |
if uploaded_file:
|
| 27 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
|
| 28 |
tmp_file.write(uploaded_file.read())
|
| 29 |
inp_path = tmp_file.name
|
| 30 |
wn = wntr.network.WaterNetworkModel(inp_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
|
| 33 |
-
user_question = st.text_input("Ask a question about your water network model")
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
Generate a Python function called `answer()` that computes and returns the result as a variable named `result`.
|
| 39 |
-
Only include valid Python code. Do not include markdown, explanations, or text outside the function."""
|
| 40 |
|
| 41 |
try:
|
| 42 |
response = llm(prompt)[0]["generated_text"]
|
| 43 |
-
|
| 44 |
-
if
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
else:
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
st.markdown("### π§ Generated Code")
|
| 51 |
-
st.code(generated_code, language="python")
|
| 52 |
-
|
| 53 |
-
local_env = {"wn": wn}
|
| 54 |
-
exec(generated_code, local_env)
|
| 55 |
-
result = local_env["answer"]()
|
| 56 |
-
st.markdown("### π€ Output")
|
| 57 |
-
st.success(result)
|
| 58 |
except Exception as e:
|
| 59 |
-
st.
|
| 60 |
-
st.error(f"Error running generated code: {e}")
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import tempfile
|
| 3 |
import wntr
|
| 4 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 5 |
|
|
|
|
|
|
|
|
|
|
| 6 |
@st.cache_resource
|
| 7 |
def load_llm():
|
| 8 |
+
model_name = "deepseek-ai/deepseek-coder-6.7b-instruct" # You can swap this with another DeepSeek model
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 10 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
|
| 11 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|
| 12 |
|
|
|
|
| 13 |
llm = load_llm()
|
| 14 |
|
| 15 |
+
st.title("π§ EPANET + WNTR + DeepSeek LLM Assistant")
|
| 16 |
|
| 17 |
+
uploaded_file = st.file_uploader("Upload your EPANET .inp file", type=["inp"])
|
|
|
|
| 18 |
|
|
|
|
| 19 |
wn = None
|
| 20 |
if uploaded_file:
|
| 21 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
|
| 22 |
tmp_file.write(uploaded_file.read())
|
| 23 |
inp_path = tmp_file.name
|
| 24 |
wn = wntr.network.WaterNetworkModel(inp_path)
|
| 25 |
+
st.success("Water network model loaded successfully.")
|
| 26 |
+
|
| 27 |
+
question = st.text_input("Ask a question about your water network model")
|
| 28 |
+
|
| 29 |
+
if st.button("Generate Python Code") and wn and question:
|
| 30 |
+
prompt = f"""
|
| 31 |
+
You are a Python expert using the WNTR library for water network simulations.
|
| 32 |
+
Given a WNTR water network model `wn`, generate a Python function called `answer()` that answers this question:
|
| 33 |
|
| 34 |
+
Question: {question}
|
|
|
|
| 35 |
|
| 36 |
+
The function must use the `wn` model, store the final answer in a variable called `result`, and return it.
|
| 37 |
+
Only output valid Python code. Do not include markdown or explanations.
|
| 38 |
+
"""
|
|
|
|
|
|
|
| 39 |
|
| 40 |
try:
|
| 41 |
response = llm(prompt)[0]["generated_text"]
|
| 42 |
+
code_start = response.find("def answer")
|
| 43 |
+
if code_start != -1:
|
| 44 |
+
code = response[code_start:]
|
| 45 |
+
st.subheader("π§ Generated Code")
|
| 46 |
+
st.code(code, language="python")
|
| 47 |
+
|
| 48 |
+
local_vars = {"wn": wn}
|
| 49 |
+
try:
|
| 50 |
+
exec(code, local_vars)
|
| 51 |
+
result = local_vars["answer"]()
|
| 52 |
+
st.subheader("π€ Output")
|
| 53 |
+
st.success(result)
|
| 54 |
+
except Exception as e:
|
| 55 |
+
st.subheader("π€ Output")
|
| 56 |
+
st.error(f"Error executing function: {e}")
|
| 57 |
else:
|
| 58 |
+
st.error("Could not extract Python function from LLM response.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
except Exception as e:
|
| 60 |
+
st.error(f"Error querying DeepSeek model: {e}")
|
|
|