aravindsamala05 commited on
Commit
c3b7e05
Β·
verified Β·
1 Parent(s): ca99b17

Upload 7 files

Browse files
Files changed (7) hide show
  1. README.md +16 -11
  2. REPORT.md +0 -0
  3. app.py +37 -37
  4. detect.py +17 -17
  5. parse_logs.py +6 -0
  6. requirements.txt +4 -0
  7. sample_logs/example_log.txt +5 -0
README.md CHANGED
@@ -1,11 +1,16 @@
1
- ---
2
- title: Logshield
3
- emoji: 🐒
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: static
7
- pinned: false
8
- license: mit
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
1
+ LogShield – AI-Powered Log Anomaly Detector
2
+ πŸ”’ Automatically detect anomalies in log files using AI (NLP).
3
+ πŸš€ Features
4
+ Upload a .txt log file
5
+
6
+ Automatically classify log lines as:
7
+
8
+ βœ… Normal
9
+
10
+ 🚨 Anomaly
11
+
12
+ Visualize error counts in a chart
13
+
14
+ Easy-to-use Streamlit web app
15
+
16
+ Powered by Hugging Face Transformers (distilbert-base-uncased-finetuned-sst-2-english)
REPORT.md ADDED
File without changes
app.py CHANGED
@@ -1,37 +1,37 @@
1
- import streamlit as st
2
- from parse_logs import parse_log_file
3
- from detect import load_model, detect_anomaly
4
- import matplotlib.pyplot as plt
5
-
6
- st.set_page_config(page_title="LogShield - AI-Powered Log Anomaly Detector")
7
-
8
- st.title("πŸ”’ LogShield - AI-Powered Log Anomaly Detector")
9
-
10
- uploaded_file = st.file_uploader("πŸ“ Upload your log file (.txt):", type="txt")
11
-
12
- if uploaded_file:
13
- st.success("βœ… File Uploaded Successfully!")
14
-
15
- logs = parse_log_file(uploaded_file.read())
16
- st.write(f"Total Log Lines: {len(logs)}")
17
-
18
- with st.spinner("Analyzing Logs..."):
19
- model = load_model()
20
- results = detect_anomaly(model, logs)
21
-
22
- # Display results
23
- anomaly_count = results.count("Anomaly")
24
- normal_count = results.count("Normal")
25
-
26
- st.subheader("🚨 Anomaly Detection Results:")
27
- for log, result in zip(logs, results):
28
- color = "red" if result == "Anomaly" else "green"
29
- st.markdown(f"<span style='color:{color}'><strong>{result}</strong>: {log}</span>", unsafe_allow_html=True)
30
-
31
- # Display Summary
32
- st.subheader("πŸ” Summary Chart")
33
- fig, ax = plt.subplots()
34
- ax.bar(["Normal", "Anomaly"], [normal_count, anomaly_count], color=["green", "red"])
35
- ax.set_ylabel("Count")
36
- ax.set_title("Log Summary")
37
- st.pyplot(fig)
 
1
+ import streamlit as st
2
+ from parse_logs import parse_log_file
3
+ from detect import load_model, detect_anomaly
4
+ import matplotlib.pyplot as plt
5
+
6
+ st.set_page_config(page_title="LogShield - AI-Powered Log Anomaly Detector")
7
+
8
+ st.title("πŸ”’ LogShield - AI-Powered Log Anomaly Detector")
9
+
10
+ uploaded_file = st.file_uploader("πŸ“ Upload your log file (.txt):", type="txt")
11
+
12
+ if uploaded_file:
13
+ st.success("βœ… File Uploaded Successfully!")
14
+
15
+ logs = parse_log_file(uploaded_file.read())
16
+ st.write(f"Total Log Lines: {len(logs)}")
17
+
18
+ with st.spinner("Analyzing Logs..."):
19
+ model = load_model()
20
+ results = detect_anomaly(model, logs)
21
+
22
+ # Display results
23
+ anomaly_count = results.count("Anomaly")
24
+ normal_count = results.count("Normal")
25
+
26
+ st.subheader("🚨 Anomaly Detection Results:")
27
+ for log, result in zip(logs, results):
28
+ color = "red" if result == "Anomaly" else "green"
29
+ st.markdown(f"<span style='color:{color}'><strong>{result}</strong>: {log}</span>", unsafe_allow_html=True)
30
+
31
+ # Display Summary
32
+ st.subheader("πŸ” Summary Chart")
33
+ fig, ax = plt.subplots()
34
+ ax.bar(["Normal", "Anomaly"], [normal_count, anomaly_count], color=["green", "red"])
35
+ ax.set_ylabel("Count")
36
+ ax.set_title("Log Summary")
37
+ st.pyplot(fig)
detect.py CHANGED
@@ -1,17 +1,17 @@
1
- from transformers import pipeline
2
-
3
- # Load sentiment-analysis model
4
- def load_model():
5
- return pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
6
-
7
- # Predict anomaly for each log line
8
- def detect_anomaly(model, logs):
9
- results = model(logs)
10
- predictions = []
11
- for result in results:
12
- # Treat negative sentiment as Anomaly, positive/neutral as Normal
13
- if result['label'] == 'NEGATIVE':
14
- predictions.append("Anomaly")
15
- else:
16
- predictions.append("Normal")
17
- return predictions
 
1
+ from transformers import pipeline
2
+
3
+ # Load sentiment-analysis model
4
+ def load_model():
5
+ return pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
6
+
7
+ # Predict anomaly for each log line
8
+ def detect_anomaly(model, logs):
9
+ results = model(logs)
10
+ predictions = []
11
+ for result in results:
12
+ # Treat negative sentiment as Anomaly, positive/neutral as Normal
13
+ if result['label'] == 'NEGATIVE':
14
+ predictions.append("Anomaly")
15
+ else:
16
+ predictions.append("Normal")
17
+ return predictions
parse_logs.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def parse_log_file(file_content):
2
+ # Decode file and split into lines
3
+ lines = file_content.decode("utf-8").split("\n")
4
+ # Clean and remove empty lines
5
+ clean_logs = [line.strip() for line in lines if line.strip()]
6
+ return clean_logs
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ transformers
3
+ torch
4
+ matplotlib
sample_logs/example_log.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ INFO 2025-07-10 10:00:00 - Server started successfully
2
+ WARNING 2025-07-10 10:01:00 - High memory usage detected
3
+ ERROR 2025-07-10 10:02:00 - Database connection failed
4
+ INFO 2025-07-10 10:03:00 - Background job completed
5
+ CRITICAL 2025-07-10 10:04:00 - Unauthorized access detected