|
|
import streamlit as st |
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
from huggingface_hub import InferenceClient |
|
|
from pypdf import PdfReader |
|
|
import io |
|
|
import time |
|
|
|
|
|
|
|
|
st.set_page_config( |
|
|
page_title="E.S.T.E.R A Clinical Workstation", |
|
|
page_icon="๐ฅ", |
|
|
layout="wide", |
|
|
initial_sidebar_state="expanded" |
|
|
) |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.title("๐ฅ Workstation") |
|
|
|
|
|
|
|
|
if "HF_TOKEN" in st.secrets: |
|
|
hf_token = st.secrets["HF_TOKEN"] |
|
|
else: |
|
|
st.error("๐จ System Error: API Token is missing. Please add HF_TOKEN in Space Settings.") |
|
|
st.stop() |
|
|
|
|
|
client = InferenceClient(token=hf_token) |
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
st.subheader("๐ Patient Records") |
|
|
uploaded_files = st.file_uploader("Upload Labs/History (PDF/CSV)", type=["pdf", "csv"], accept_multiple_files=True) |
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
with st.expander("๐งฎ Quick BMI Calc"): |
|
|
weight = st.number_input("Weight (kg)", 0.0, 300.0, 70.0) |
|
|
height = st.number_input("Height (m)", 0.0, 2.5, 1.75) |
|
|
if st.button("Calc BMI"): |
|
|
bmi = weight / (height ** 2) |
|
|
st.info(f"BMI: {bmi:.1f}") |
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
st.subheader("๐ Documentation") |
|
|
if st.button("๐ Generate SOAP Note", use_container_width=True): |
|
|
if "messages" in st.session_state and len(st.session_state.messages) > 1: |
|
|
chat_history = "\n".join([f"{m['role']}: {m['content']}" for m in st.session_state.messages]) |
|
|
|
|
|
prompt = f""" |
|
|
Act as a Medical Scribe. Convert the following conversation into a professional SOAP Note. |
|
|
Format: |
|
|
- SUBJECTIVE: (Symptoms, History) |
|
|
- OBJECTIVE: (Vitals, Labs - if mentioned) |
|
|
- ASSESSMENT: (Potential Diagnosis) |
|
|
- PLAN: (Treatment, Next Steps) |
|
|
|
|
|
CONVERSATION: |
|
|
{chat_history} |
|
|
""" |
|
|
|
|
|
try: |
|
|
response = client.chat_completion( |
|
|
model="Qwen/Qwen2.5-72B-Instruct", |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
max_tokens=600, temperature=0.1 |
|
|
) |
|
|
soap_note = response.choices[0].message.content |
|
|
st.session_state.soap_note = soap_note |
|
|
except Exception as e: |
|
|
st.error(f"Error generating note: {e}") |
|
|
|
|
|
|
|
|
if "soap_note" in st.session_state: |
|
|
st.text_area("SOAP Draft", st.session_state.soap_note, height=300) |
|
|
st.download_button("๐พ Save to EMR", st.session_state.soap_note, "soap_note.txt") |
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
if st.button("๐๏ธ Clear Patient Data", use_container_width=True): |
|
|
st.session_state.messages = [] |
|
|
if "soap_note" in st.session_state: del st.session_state.soap_note |
|
|
st.rerun() |
|
|
|
|
|
|
|
|
@st.cache_resource(show_spinner=False) |
|
|
def process_uploaded_files(uploaded_files): |
|
|
all_texts = [] |
|
|
source_map = [] |
|
|
|
|
|
|
|
|
base_files = [ |
|
|
"Cleaned_Data_Lite.csv", "Medical_Terms_Wiki.csv", "Sample_Clinical_Dialogues.csv", |
|
|
"doctor_patient_1.csv", "symptoms_diagnosis.csv", "conversation_status.csv", |
|
|
"doctor_result.csv", "nurse_data.csv", "medical_intelligence.csv", |
|
|
"indian_medicines.csv", "internal_med_qa.csv" |
|
|
] |
|
|
|
|
|
|
|
|
for fname in base_files: |
|
|
try: |
|
|
|
|
|
df = pd.read_csv(fname, nrows=100) |
|
|
|
|
|
|
|
|
texts = df.astype(str).agg(' '.join, axis=1).tolist() |
|
|
|
|
|
all_texts.extend(texts) |
|
|
source_map.extend([f"๐ {fname}"] * len(texts)) |
|
|
except: |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
if uploaded_files: |
|
|
for file in uploaded_files: |
|
|
if file.name.endswith('.pdf'): |
|
|
try: |
|
|
pdf_reader = PdfReader(file) |
|
|
text = "" |
|
|
for page in pdf_reader.pages: text += page.extract_text() + "\n" |
|
|
chunks = [text[i:i+1000] for i in range(0, len(text), 1000)] |
|
|
all_texts.extend(chunks) |
|
|
source_map.extend([f"๐ {file.name}"] * len(chunks)) |
|
|
except: pass |
|
|
elif file.name.endswith('.csv'): |
|
|
try: |
|
|
df = pd.read_csv(file) |
|
|
texts = df.astype(str).agg(' '.join, axis=1).tolist() |
|
|
all_texts.extend(texts) |
|
|
source_map.extend([f"๐ {file.name}"] * len(texts)) |
|
|
except: pass |
|
|
|
|
|
if not all_texts: return None, None, None |
|
|
try: |
|
|
|
|
|
embeddings = client.feature_extraction(all_texts[:1500], model="sentence-transformers/all-MiniLM-L6-v2") |
|
|
return all_texts[:1500], np.array(embeddings), source_map[:1500] |
|
|
except: return None, None, None |
|
|
|
|
|
docs, doc_embeddings, doc_sources = process_uploaded_files(uploaded_files) |
|
|
|
|
|
|
|
|
st.title("๐ฉบ E.S.T.E.R for Clinical Support") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [{"role": "assistant", "content": "Medical Systems Online. Accessing Global Database... Ready."}] |
|
|
|
|
|
|
|
|
col1, col2, col3, col4 = st.columns(4) |
|
|
action = None |
|
|
if col1.button("๐ Dosage"): action = "What is the standard dosage for this condition?" |
|
|
if col2.button("โ ๏ธ Side Effects"): action = "List common side effects and contraindications." |
|
|
if col3.button("๐ Interactions"): action = "Are there any drug interactions I should know about?" |
|
|
if col4.button("๐ Protocol"): action = "Outline the standard treatment protocol." |
|
|
|
|
|
|
|
|
for msg in st.session_state.messages: |
|
|
with st.chat_message(msg["role"]): |
|
|
st.markdown(msg["content"]) |
|
|
|
|
|
|
|
|
prompt = st.chat_input("Enter clinical query...") |
|
|
if action: prompt = action |
|
|
|
|
|
if prompt: |
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
with st.chat_message("user"): st.markdown(prompt) |
|
|
|
|
|
|
|
|
context_text = "General medical guidelines." |
|
|
source_file = "AI Knowledge Base" |
|
|
|
|
|
if doc_embeddings is not None: |
|
|
try: |
|
|
query_vec = np.array(client.feature_extraction([prompt], model="sentence-transformers/all-MiniLM-L6-v2"))[0] |
|
|
scores = np.dot(doc_embeddings, query_vec) |
|
|
best_idx = np.argmax(scores) |
|
|
if scores[best_idx] > 0.25: |
|
|
context_text = docs[best_idx] |
|
|
source_file = doc_sources[best_idx] |
|
|
except: pass |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
message_placeholder = st.empty() |
|
|
full_response = "" |
|
|
|
|
|
system_prompt = f""" |
|
|
TASK: You are a strict Medical Assistant. |
|
|
RULES: |
|
|
1. ONLY answer clinical, pharmaceutical, or biological questions. |
|
|
2. If the user asks for essays, poems, code, or general chat, REFUSE politely. |
|
|
3. Say: "I am a Clinical AI. I cannot help with non-medical tasks." |
|
|
4. your name is E.S.T.E.R : Electronic Specialist for Technical Examining & Reporting. |
|
|
CONTEXT ({source_file}): {context_text} |
|
|
""" |
|
|
|
|
|
try: |
|
|
stream = client.chat_completion( |
|
|
model="Qwen/Qwen2.5-72B-Instruct", |
|
|
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}], |
|
|
max_tokens=400, stream=True, temperature=0.1 |
|
|
) |
|
|
for chunk in stream: |
|
|
if chunk.choices and chunk.choices[0].delta.content: |
|
|
full_response += chunk.choices[0].delta.content |
|
|
message_placeholder.markdown(full_response + "โ") |
|
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
|
|
|
if source_file != "AI Knowledge Base": |
|
|
with st.expander(f"๐ Source: {source_file}"): st.info(context_text[:500] + "...") |
|
|
|
|
|
except Exception as e: st.error(f"Error: {e}") |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": full_response}) |
|
|
|