Update app.py
Browse files
app.py
CHANGED
|
@@ -7,22 +7,24 @@ import gradio as gr
|
|
| 7 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 8 |
from peft import PeftModel
|
| 9 |
|
| 10 |
-
|
| 11 |
# =========================
|
| 12 |
# 1) إعداد المسارات
|
| 13 |
# =========================
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
SIM_THRESHOLD
|
| 20 |
-
MAX_RAG_ANSWER_LEN = 220
|
| 21 |
|
| 22 |
|
| 23 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 24 |
-
device
|
| 25 |
-
|
| 26 |
|
| 27 |
# =========================
|
| 28 |
# 2) تحميل المودل + LoRA
|
|
@@ -57,7 +59,7 @@ model = PeftModel.from_pretrained(
|
|
| 57 |
|
| 58 |
model.to(device)
|
| 59 |
model.eval()
|
| 60 |
-
print("✅ Model ready
|
| 61 |
|
| 62 |
|
| 63 |
# =========================
|
|
@@ -68,8 +70,10 @@ df = pd.read_excel(EXCEL_FILE)
|
|
| 68 |
|
| 69 |
print("🧾 Columns:", list(df.columns))
|
| 70 |
|
| 71 |
-
q_candidates = [c for c in df.columns
|
| 72 |
-
|
|
|
|
|
|
|
| 73 |
|
| 74 |
if q_candidates and a_candidates:
|
| 75 |
QCOL = q_candidates[0]
|
|
@@ -82,7 +86,7 @@ df = df[[QCOL, ACOL]]
|
|
| 82 |
df.columns = ["question", "answer"]
|
| 83 |
|
| 84 |
df["question"] = df["question"].astype(str).str.strip()
|
| 85 |
-
df["answer"]
|
| 86 |
|
| 87 |
qa_data = df.to_dict(orient="records")
|
| 88 |
print("📚 Loaded RAG entries:", len(qa_data))
|
|
@@ -122,7 +126,7 @@ def normalize_question(q: str) -> str:
|
|
| 122 |
if "اسم" in lower_q or "name" in lower_q:
|
| 123 |
return f"ما اسم مقرر {course}؟"
|
| 124 |
|
| 125 |
-
# ساعات
|
| 126 |
if "ساع" in lower_q or "hour" in lower_q:
|
| 127 |
return f"كم عدد ساعات مقرر {course}؟"
|
| 128 |
|
|
@@ -137,7 +141,7 @@ def normalize_question(q: str) -> str:
|
|
| 137 |
AR_STOPWORDS = {
|
| 138 |
"ما", "هو", "هي", "هل", "عن", "في", "من", "الى", "إلى",
|
| 139 |
"مادة", "مقرر", "المقرر", "المادة", "ماهي", "ماهو",
|
| 140 |
-
|
| 141 |
}
|
| 142 |
|
| 143 |
def tokenize(text: str):
|
|
@@ -200,15 +204,27 @@ def best_match(user_q: str, records):
|
|
| 200 |
|
| 201 |
|
| 202 |
# =========================
|
| 203 |
-
# 6) توليد الجواب من المودل
|
| 204 |
# =========================
|
| 205 |
|
| 206 |
SYSTEM_PROMPT = (
|
| 207 |
"أنت مساعد أكاديمي متخصص في جامعة تبوك. "
|
| 208 |
-
"أجب فقط بالمعلومة
|
| 209 |
"بدون شرح إضافي وبدون كلام زائد."
|
| 210 |
)
|
| 211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
def generate_from_model(q: str) -> str:
|
| 213 |
msgs = [
|
| 214 |
{"role": "system", "content": SYSTEM_PROMPT},
|
|
@@ -232,10 +248,12 @@ def generate_from_model(q: str) -> str:
|
|
| 232 |
with torch.no_grad():
|
| 233 |
outputs = model.generate(
|
| 234 |
**inputs,
|
| 235 |
-
max_new_tokens=
|
| 236 |
-
do_sample=
|
| 237 |
-
temperature=0.
|
| 238 |
-
|
|
|
|
|
|
|
| 239 |
eos_token_id=tokenizer.eos_token_id,
|
| 240 |
pad_token_id=tokenizer.pad_token_id
|
| 241 |
)
|
|
@@ -243,7 +261,9 @@ def generate_from_model(q: str) -> str:
|
|
| 243 |
prompt_len = inputs["input_ids"].shape[-1]
|
| 244 |
out_ids = outputs[0][prompt_len:]
|
| 245 |
ans = tokenizer.decode(out_ids, skip_special_tokens=True).strip()
|
| 246 |
-
|
|
|
|
|
|
|
| 247 |
|
| 248 |
|
| 249 |
# =========================
|
|
|
|
| 7 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 8 |
from peft import PeftModel
|
| 9 |
|
|
|
|
| 10 |
# =========================
|
| 11 |
# 1) إعداد المسارات
|
| 12 |
# =========================
|
| 13 |
|
| 14 |
+
# لو حاب تنتقل لـ 3B:
|
| 15 |
+
# BASE_MODEL = "meta-llama/Llama-3.2-3B-Instruct"
|
| 16 |
+
# ADAPTER_REPO = "Turkiii0/UT-AI-3B-LoRA"
|
| 17 |
+
# لو تبي تبقى على 1B خله زي ما هو:
|
| 18 |
+
BASE_MODEL = "meta-llama/Llama-3.2-3B-Instruct"
|
| 19 |
+
ADAPTER_REPO = "Turkiii0/UT-AI-model" # عدّل هنا اسم ريبو اللورا لو غيرته
|
| 20 |
+
EXCEL_FILE = "1000 Q.xlsx" # اسم ملف الإكسل
|
| 21 |
|
| 22 |
+
SIM_THRESHOLD = 0.60 # عتبة التشابه لجواب الإكسل
|
| 23 |
+
MAX_RAG_ANSWER_LEN = 220 # أقصى طول نسمح فيه لإجابة الإكسل
|
| 24 |
|
| 25 |
|
| 26 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 27 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 28 |
|
| 29 |
# =========================
|
| 30 |
# 2) تحميل المودل + LoRA
|
|
|
|
| 59 |
|
| 60 |
model.to(device)
|
| 61 |
model.eval()
|
| 62 |
+
print("✅ Model ready on:", device)
|
| 63 |
|
| 64 |
|
| 65 |
# =========================
|
|
|
|
| 70 |
|
| 71 |
print("🧾 Columns:", list(df.columns))
|
| 72 |
|
| 73 |
+
q_candidates = [c for c in df.columns
|
| 74 |
+
if "سؤال" in str(c).lower() or "question" in str(c).lower()]
|
| 75 |
+
a_candidates = [c for c in df.columns
|
| 76 |
+
if "جواب" in str(c).lower() or "answer" in str(c).lower()]
|
| 77 |
|
| 78 |
if q_candidates and a_candidates:
|
| 79 |
QCOL = q_candidates[0]
|
|
|
|
| 86 |
df.columns = ["question", "answer"]
|
| 87 |
|
| 88 |
df["question"] = df["question"].astype(str).str.strip()
|
| 89 |
+
df["answer"] = df["answer"].astype(str).str.strip()
|
| 90 |
|
| 91 |
qa_data = df.to_dict(orient="records")
|
| 92 |
print("📚 Loaded RAG entries:", len(qa_data))
|
|
|
|
| 126 |
if "اسم" in lower_q or "name" in lower_q:
|
| 127 |
return f"ما اسم مقرر {course}؟"
|
| 128 |
|
| 129 |
+
# عدد ساعات
|
| 130 |
if "ساع" in lower_q or "hour" in lower_q:
|
| 131 |
return f"كم عدد ساعات مقرر {course}؟"
|
| 132 |
|
|
|
|
| 141 |
AR_STOPWORDS = {
|
| 142 |
"ما", "هو", "هي", "هل", "عن", "في", "من", "الى", "إلى",
|
| 143 |
"مادة", "مقرر", "المقرر", "المادة", "ماهي", "ماهو",
|
| 144 |
+
"كم", "متطلبات", "متطلب", "متى"
|
| 145 |
}
|
| 146 |
|
| 147 |
def tokenize(text: str):
|
|
|
|
| 204 |
|
| 205 |
|
| 206 |
# =========================
|
| 207 |
+
# 6) توليد الجواب من المودل + تنظيف التكرار
|
| 208 |
# =========================
|
| 209 |
|
| 210 |
SYSTEM_PROMPT = (
|
| 211 |
"أنت مساعد أكاديمي متخصص في جامعة تبوك. "
|
| 212 |
+
"أجب فقط بالمعلومة المطلوبة (اسم مقرر، متطلب سابق، عدد ساعات، أو ضابط أكاديمي محدد) "
|
| 213 |
"بدون شرح إضافي وبدون كلام زائد."
|
| 214 |
)
|
| 215 |
|
| 216 |
+
def clean_repetition(text: str) -> str:
|
| 217 |
+
"""يحاول يشيل التكرار بعد الفواصل العربية."""
|
| 218 |
+
parts = [p.strip() for p in text.split("،") if p.strip()]
|
| 219 |
+
seen = set()
|
| 220 |
+
out = []
|
| 221 |
+
for p in parts:
|
| 222 |
+
if p not in seen:
|
| 223 |
+
out.append(p)
|
| 224 |
+
seen.add(p)
|
| 225 |
+
return "، ".join(out) if out else text
|
| 226 |
+
|
| 227 |
+
|
| 228 |
def generate_from_model(q: str) -> str:
|
| 229 |
msgs = [
|
| 230 |
{"role": "system", "content": SYSTEM_PROMPT},
|
|
|
|
| 248 |
with torch.no_grad():
|
| 249 |
outputs = model.generate(
|
| 250 |
**inputs,
|
| 251 |
+
max_new_tokens=80,
|
| 252 |
+
do_sample=True,
|
| 253 |
+
temperature=0.3,
|
| 254 |
+
top_p=0.9,
|
| 255 |
+
repetition_penalty=1.35,
|
| 256 |
+
no_repeat_ngram_size=4,
|
| 257 |
eos_token_id=tokenizer.eos_token_id,
|
| 258 |
pad_token_id=tokenizer.pad_token_id
|
| 259 |
)
|
|
|
|
| 261 |
prompt_len = inputs["input_ids"].shape[-1]
|
| 262 |
out_ids = outputs[0][prompt_len:]
|
| 263 |
ans = tokenizer.decode(out_ids, skip_special_tokens=True).strip()
|
| 264 |
+
ans = " ".join(ans.split())
|
| 265 |
+
ans = clean_repetition(ans)
|
| 266 |
+
return ans if ans else "لم أجد إجابة واضحة لهذا السؤال."
|
| 267 |
|
| 268 |
|
| 269 |
# =========================
|