Elprofessore commited on
Commit
7d04911
·
verified ·
1 Parent(s): 93bc47b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -0
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import asyncio
3
+ import re
4
+ import os
5
+ from llama_cpp import Llama
6
+ import requests
7
+ from bs4 import BeautifulSoup
8
+
9
+ # Set page configuration
10
+ st.set_page_config(page_title="Security Assistant", page_icon="🔒", layout="wide")
11
+
12
+ # Custom CSS for styling
13
+ st.markdown(
14
+ """
15
+ <style>
16
+ .user-message { background-color: #DCF8C6; padding: 10px; border-radius: 10px; margin: 5px 0; }
17
+ .assistant-message { background-color: #E9ECEF; padding: 10px; border-radius: 10px; margin: 5px 0; }
18
+ .tool-output { background-color: #F8F9FA; padding: 10px; border-radius: 10px; border: 1px solid #DEE2E6; }
19
+ </style>
20
+ """,
21
+ unsafe_allow_html=True
22
+ )
23
+
24
+ # Cache the model loading
25
+ @st.cache_resource
26
+ def load_model():
27
+ # Model path consistent across environments
28
+ model_path = os.path.join("models", "pentest_ai.Q4_0.gguf")
29
+ if not os.path.exists(model_path):
30
+ st.error(f"Model file not found at {model_path}. Please ensure it’s placed correctly.")
31
+ return None
32
+ try:
33
+ model = Llama(model_path=model_path, n_ctx=2048, n_threads=4, verbose=False)
34
+ return model
35
+ except Exception as e:
36
+ st.error(f"Failed to load model: {e}")
37
+ return None
38
+
39
+ # Execute tools asynchronously
40
+ async def run_tool(command: str) -> str:
41
+ try:
42
+ process = await asyncio.create_subprocess_shell(
43
+ command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
44
+ )
45
+ stdout, stderr = await process.communicate()
46
+ return stdout.decode() if stdout else stderr.decode()
47
+ except Exception as e:
48
+ return f"Error executing tool: {str(e)}"
49
+
50
+ # Fetch vulnerability info via web scraping (no API keys)
51
+ def get_vulnerability_info(query: str) -> str:
52
+ try:
53
+ url = f"https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword={query}"
54
+ response = requests.get(url, timeout=10)
55
+ soup = BeautifulSoup(response.text, "html.parser")
56
+ results = soup.find_all("tr")[1:6] # Top 5 results
57
+ vulns = [f"{row.find_all('td')[0].text}: {row.find_all('td')[1].text}" for row in results]
58
+ return "\n".join(vulns) if vulns else "No vulnerabilities found."
59
+ except Exception as e:
60
+ return f"Error fetching vulnerability data: {str(e)}"
61
+
62
+ # Session state management
63
+ if "messages" not in st.session_state:
64
+ st.session_state.messages = []
65
+
66
+ # Add message to chat history
67
+ def add_message(content: str, is_user: bool):
68
+ st.session_state.messages.append({"content": content, "is_user": is_user})
69
+
70
+ # Render chat history
71
+ def render_chat():
72
+ for msg in st.session_state.messages:
73
+ bubble_class = "user-message" if msg["is_user"] else "assistant-message"
74
+ st.markdown(f'<div class="{bubble_class}">{msg["content"]}</div>', unsafe_allow_html=True)
75
+
76
+ # Main application
77
+ def main():
78
+ st.title("🔒 Open-Source Security Assistant")
79
+ st.markdown("Powered by pentest_ai.Q4_0.gguf. Runs locally or on Hugging Face Spaces.")
80
+
81
+ # Sidebar for settings
82
+ with st.sidebar:
83
+ max_tokens = st.slider("Max Tokens", 128, 1024, 256)
84
+ if st.button("Clear Chat"):
85
+ st.session_state.messages = []
86
+
87
+ # Load model
88
+ model = load_model()
89
+ if not model:
90
+ st.warning("Model loading failed. Check logs or ensure the model file is available.")
91
+ return
92
+
93
+ render_chat()
94
+
95
+ # Chat input form
96
+ with st.form("chat_form", clear_on_submit=True):
97
+ user_input = st.text_area("Ask a security question...", height=100)
98
+ submit = st.form_submit_button("Send")
99
+
100
+ if submit and user_input:
101
+ add_message(user_input, True)
102
+ with st.spinner("Processing..."):
103
+ # Prepare prompt
104
+ system_prompt = """
105
+ You are a cybersecurity assistant with expertise in penetration testing.
106
+ Provide concise, actionable insights. Use [TOOL: tool_name ARGS: "args"] for tool suggestions.
107
+ """
108
+ full_prompt = f"{system_prompt}\nUser: {user_input}\nAssistant:"
109
+
110
+ # Generate response
111
+ response = model.create_completion(
112
+ full_prompt, max_tokens=max_tokens, temperature=0.7, stop=["User:"]
113
+ )
114
+ generated_text = response["choices"][0]["text"].strip()
115
+
116
+ # Parse for tool execution
117
+ tool_pattern = r"\[TOOL: (\w+) ARGS: \"(.*?)\"\]"
118
+ match = re.search(tool_pattern, generated_text)
119
+ if match:
120
+ tool_name, args = match.groups()
121
+ tool_output = asyncio.run(run_tool(f"{tool_name} {args}"))
122
+ generated_text += f"\n\n<div class='tool-output'>Tool Output:\n{tool_output}</div>"
123
+
124
+ # Handle vulnerability lookups
125
+ if "vulnerability" in user_input.lower():
126
+ query = user_input.split()[-1] # Simplified query extraction
127
+ vulns = get_vulnerability_info(query)
128
+ generated_text += f"\n\nVulnerability Data:\n{vulns}"
129
+
130
+ add_message(generated_text, False)
131
+
132
+ if __name__ == "__main__":
133
+ main()