File size: 8,701 Bytes
9998610 a2e7378 9998610 a2e7378 9998610 f19913a a2e7378 d69e768 9998610 a2e7378 d69e768 a2e7378 7d8d694 a2e7378 d1114ad a2e7378 d69e768 a2e7378 d69e768 a2e7378 d69e768 a2e7378 d69e768 a2e7378 d69e768 a2e7378 d69e768 a2e7378 4e1fc2c a2e7378 39ff491 a2e7378 d1114ad a2e7378 d1114ad a2e7378 d1114ad d69e768 a2e7378 32126d0 d1114ad a2e7378 d1114ad a2e7378 d1114ad a2e7378 d1114ad a2e7378 d1114ad a2e7378 7d8d694 a2e7378 f19913a a2e7378 39ff491 a2e7378 39ff491 a2e7378 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
import streamlit as st
import pandas as pd
import numpy as np
from huggingface_hub import InferenceClient
from pypdf import PdfReader
import io
import time
# --- 1. PROFESSIONAL CONFIGURATION ---
st.set_page_config(
page_title="E.S.T.E.R A Clinical Workstation",
page_icon="๐ฅ",
layout="wide",
initial_sidebar_state="expanded"
)
# --- 2. SIDEBAR (TOOLS & UPLOAD) ---
with st.sidebar:
st.title("๐ฅ Workstation")
# --- SECURITY: SILENT AUTHENTICATION ---
if "HF_TOKEN" in st.secrets:
hf_token = st.secrets["HF_TOKEN"]
else:
st.error("๐จ System Error: API Token is missing. Please add HF_TOKEN in Space Settings.")
st.stop()
client = InferenceClient(token=hf_token)
# B. UPLOAD PATIENT FILES
st.markdown("---")
st.subheader("๐ Patient Records")
uploaded_files = st.file_uploader("Upload Labs/History (PDF/CSV)", type=["pdf", "csv"], accept_multiple_files=True)
# C. QUICK CALCULATOR
st.markdown("---")
with st.expander("๐งฎ Quick BMI Calc"):
weight = st.number_input("Weight (kg)", 0.0, 300.0, 70.0)
height = st.number_input("Height (m)", 0.0, 2.5, 1.75)
if st.button("Calc BMI"):
bmi = weight / (height ** 2)
st.info(f"BMI: {bmi:.1f}")
# D. EXPORT
st.markdown("---")
st.subheader("๐ Documentation")
if st.button("๐ Generate SOAP Note", use_container_width=True):
if "messages" in st.session_state and len(st.session_state.messages) > 1:
chat_history = "\n".join([f"{m['role']}: {m['content']}" for m in st.session_state.messages])
prompt = f"""
Act as a Medical Scribe. Convert the following conversation into a professional SOAP Note.
Format:
- SUBJECTIVE: (Symptoms, History)
- OBJECTIVE: (Vitals, Labs - if mentioned)
- ASSESSMENT: (Potential Diagnosis)
- PLAN: (Treatment, Next Steps)
CONVERSATION:
{chat_history}
"""
try:
response = client.chat_completion(
model="Qwen/Qwen2.5-72B-Instruct",
messages=[{"role": "user", "content": prompt}],
max_tokens=600, temperature=0.1
)
soap_note = response.choices[0].message.content
st.session_state.soap_note = soap_note
except Exception as e:
st.error(f"Error generating note: {e}")
# Show the generated note
if "soap_note" in st.session_state:
st.text_area("SOAP Draft", st.session_state.soap_note, height=300)
st.download_button("๐พ Save to EMR", st.session_state.soap_note, "soap_note.txt")
# E. Reset
st.markdown("---")
if st.button("๐๏ธ Clear Patient Data", use_container_width=True):
st.session_state.messages = []
if "soap_note" in st.session_state: del st.session_state.soap_note
st.rerun()
# --- 3. DATA PROCESSOR (UPDATED FOR NEW FILES) ---
@st.cache_resource(show_spinner=False)
def process_uploaded_files(uploaded_files):
all_texts = []
source_map = []
# --- UPDATED FILE LIST: INCLUDES YOUR 8 NEW DATASETS ---
base_files = [
"Cleaned_Data_Lite.csv", "Medical_Terms_Wiki.csv", "Sample_Clinical_Dialogues.csv",
"doctor_patient_1.csv", "symptoms_diagnosis.csv", "conversation_status.csv",
"doctor_result.csv", "nurse_data.csv", "medical_intelligence.csv",
"indian_medicines.csv", "internal_med_qa.csv"
]
# Load Internal Knowledge Base (The 8 New Files)
for fname in base_files:
try:
# We blindly load the first 100 rows to keep it fast
df = pd.read_csv(fname, nrows=100)
# Combine all text columns into one string for the AI
texts = df.astype(str).agg(' '.join, axis=1).tolist()
all_texts.extend(texts)
source_map.extend([f"๐ {fname}"] * len(texts))
except:
# Silent fail if a file is missing so app doesn't crash
pass
# Load User Uploaded Files (PDFs)
if uploaded_files:
for file in uploaded_files:
if file.name.endswith('.pdf'):
try:
pdf_reader = PdfReader(file)
text = ""
for page in pdf_reader.pages: text += page.extract_text() + "\n"
chunks = [text[i:i+1000] for i in range(0, len(text), 1000)]
all_texts.extend(chunks)
source_map.extend([f"๐ {file.name}"] * len(chunks))
except: pass
elif file.name.endswith('.csv'):
try:
df = pd.read_csv(file)
texts = df.astype(str).agg(' '.join, axis=1).tolist()
all_texts.extend(texts)
source_map.extend([f"๐ {file.name}"] * len(texts))
except: pass
if not all_texts: return None, None, None
try:
# Vectorize (Limit to 1500 chunks to prevent memory crash)
embeddings = client.feature_extraction(all_texts[:1500], model="sentence-transformers/all-MiniLM-L6-v2")
return all_texts[:1500], np.array(embeddings), source_map[:1500]
except: return None, None, None
docs, doc_embeddings, doc_sources = process_uploaded_files(uploaded_files)
# --- 4. MAIN INTERFACE ---
st.title("๐ฉบ E.S.T.E.R for Clinical Support")
# Initialize Chat
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "assistant", "content": "Medical Systems Online. Accessing Global Database... Ready."}]
# QUICK ACTION BUTTONS
col1, col2, col3, col4 = st.columns(4)
action = None
if col1.button("๐ Dosage"): action = "What is the standard dosage for this condition?"
if col2.button("โ ๏ธ Side Effects"): action = "List common side effects and contraindications."
if col3.button("๐ Interactions"): action = "Are there any drug interactions I should know about?"
if col4.button("๐ Protocol"): action = "Outline the standard treatment protocol."
# Display History
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# INPUT LOGIC
prompt = st.chat_input("Enter clinical query...")
if action: prompt = action
if prompt:
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"): st.markdown(prompt)
# Search
context_text = "General medical guidelines."
source_file = "AI Knowledge Base"
if doc_embeddings is not None:
try:
query_vec = np.array(client.feature_extraction([prompt], model="sentence-transformers/all-MiniLM-L6-v2"))[0]
scores = np.dot(doc_embeddings, query_vec)
best_idx = np.argmax(scores)
if scores[best_idx] > 0.25:
context_text = docs[best_idx]
source_file = doc_sources[best_idx]
except: pass
# Answer
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
system_prompt = f"""
TASK: You are a strict Medical Assistant.
RULES:
1. ONLY answer clinical, pharmaceutical, or biological questions.
2. If the user asks for essays, poems, code, or general chat, REFUSE politely.
3. Say: "I am a Clinical AI. I cannot help with non-medical tasks."
4. your name is E.S.T.E.R : Electronic Specialist for Technical Examining & Reporting.
CONTEXT ({source_file}): {context_text}
"""
try:
stream = client.chat_completion(
model="Qwen/Qwen2.5-72B-Instruct",
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}],
max_tokens=400, stream=True, temperature=0.1
)
for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
full_response += chunk.choices[0].delta.content
message_placeholder.markdown(full_response + "โ")
message_placeholder.markdown(full_response)
# Show citation if available
if source_file != "AI Knowledge Base":
with st.expander(f"๐ Source: {source_file}"): st.info(context_text[:500] + "...")
except Exception as e: st.error(f"Error: {e}")
st.session_state.messages.append({"role": "assistant", "content": full_response})
|