razaali10 commited on
Commit
59222ae
Β·
verified Β·
1 Parent(s): dfcfe24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -34
app.py CHANGED
@@ -1,60 +1,60 @@
1
  import streamlit as st
2
  import tempfile
3
  import wntr
4
- import os
5
 
6
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
-
8
- # Load model once
9
  @st.cache_resource
10
  def load_llm():
11
- model_name = "Qwen/Qwen2.5-7B-Instruct-1M"
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
14
  return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
15
 
16
- # Load LLM
17
  llm = load_llm()
18
 
19
- st.title("πŸ’§ EPANET + LLM (via WNTR + Qwen LLM)")
20
 
21
- # Upload .inp file
22
- uploaded_file = st.file_uploader("Upload EPANET .inp file", type=["inp"])
23
 
24
- # Initialize the WNTR model
25
  wn = None
26
  if uploaded_file:
27
  with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
28
  tmp_file.write(uploaded_file.read())
29
  inp_path = tmp_file.name
30
  wn = wntr.network.WaterNetworkModel(inp_path)
 
 
 
 
 
 
 
 
31
 
32
- # Ask user question
33
- user_question = st.text_input("Ask a question about your water network model")
34
 
35
- if st.button("Generate Code") and wn and user_question:
36
- prompt = f"""You are a Python expert using the WNTR library. Given a water network model `wn`, answer this question:
37
- Question: {user_question}
38
- Generate a Python function called `answer()` that computes and returns the result as a variable named `result`.
39
- Only include valid Python code. Do not include markdown, explanations, or text outside the function."""
40
 
41
  try:
42
  response = llm(prompt)[0]["generated_text"]
43
- # Optional: Clean hallucinated prompt part
44
- if "def answer" in response:
45
- response = response.split("def answer", 1)[-1]
46
- generated_code = "def answer" + response
 
 
 
 
 
 
 
 
 
 
 
47
  else:
48
- raise ValueError("Function definition not found in LLM output.")
49
-
50
- st.markdown("### 🧠 Generated Code")
51
- st.code(generated_code, language="python")
52
-
53
- local_env = {"wn": wn}
54
- exec(generated_code, local_env)
55
- result = local_env["answer"]()
56
- st.markdown("### πŸ“€ Output")
57
- st.success(result)
58
  except Exception as e:
59
- st.markdown("### πŸ“€ Output")
60
- st.error(f"Error running generated code: {e}")
 
1
  import streamlit as st
2
  import tempfile
3
  import wntr
4
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
5
 
 
 
 
6
  @st.cache_resource
7
  def load_llm():
8
+ model_name = "deepseek-ai/deepseek-coder-6.7b-instruct" # You can swap this with another DeepSeek model
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
11
  return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
12
 
 
13
  llm = load_llm()
14
 
15
+ st.title("πŸ’§ EPANET + WNTR + DeepSeek LLM Assistant")
16
 
17
+ uploaded_file = st.file_uploader("Upload your EPANET .inp file", type=["inp"])
 
18
 
 
19
  wn = None
20
  if uploaded_file:
21
  with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
22
  tmp_file.write(uploaded_file.read())
23
  inp_path = tmp_file.name
24
  wn = wntr.network.WaterNetworkModel(inp_path)
25
+ st.success("Water network model loaded successfully.")
26
+
27
+ question = st.text_input("Ask a question about your water network model")
28
+
29
+ if st.button("Generate Python Code") and wn and question:
30
+ prompt = f"""
31
+ You are a Python expert using the WNTR library for water network simulations.
32
+ Given a WNTR water network model `wn`, generate a Python function called `answer()` that answers this question:
33
 
34
+ Question: {question}
 
35
 
36
+ The function must use the `wn` model, store the final answer in a variable called `result`, and return it.
37
+ Only output valid Python code. Do not include markdown or explanations.
38
+ """
 
 
39
 
40
  try:
41
  response = llm(prompt)[0]["generated_text"]
42
+ code_start = response.find("def answer")
43
+ if code_start != -1:
44
+ code = response[code_start:]
45
+ st.subheader("🧠 Generated Code")
46
+ st.code(code, language="python")
47
+
48
+ local_vars = {"wn": wn}
49
+ try:
50
+ exec(code, local_vars)
51
+ result = local_vars["answer"]()
52
+ st.subheader("πŸ“€ Output")
53
+ st.success(result)
54
+ except Exception as e:
55
+ st.subheader("πŸ“€ Output")
56
+ st.error(f"Error executing function: {e}")
57
  else:
58
+ st.error("Could not extract Python function from LLM response.")
 
 
 
 
 
 
 
 
 
59
  except Exception as e:
60
+ st.error(f"Error querying DeepSeek model: {e}")