| | |
| | """ |
| | Additional interview modes (HR, Behavioral, Coding) on top of main.py. |
| | |
| | We reuse: |
| | - FastAPI app from main.py |
| | - HF tiered generation (finetuned -> base -> error) |
| | - Resume parsing + RAG helpers |
| | - Question / response Pydantic models |
| | """ |
| |
|
| | from typing import List |
| |
|
| | from fastapi import HTTPException |
| |
|
| | from main import ( |
| | app as base_app, |
| | RESUME_STORE, |
| | hf_generate_tiered, |
| | parse_questions_from_model_output, |
| | extract_candidate_name, |
| | extract_project_names, |
| | build_rag_context, |
| | GenerateQuestionsRequest, |
| | GenerateQuestionsResponse, |
| | Question, |
| | ) |
| |
|
| | app = base_app |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def _rag_fallback_hr( |
| | candidate_name: str, |
| | domain: str, |
| | role: str, |
| | projects: List[str], |
| | n_questions: int, |
| | ) -> List[Question]: |
| | """HR / culture-fit style fallback questions.""" |
| | base: List[str] = [] |
| |
|
| | proj = projects[0] if projects else None |
| |
|
| | if proj: |
| | base.append( |
| | f"From your project \"{proj}\", what did you learn about collaborating with cross-functional teams (e.g., product, design, business)?" |
| | ) |
| | base.append( |
| | f"In \"{proj}\", describe a situation where priorities or requirements suddenly changed. How did you adapt and help the team move forward?" |
| | ) |
| |
|
| | |
| | base.extend( |
| | [ |
| | "What kind of work environment helps you perform at your best, and how do you contribute to that culture in your team?", |
| | f"In this {role} role, how would you handle a situation where you strongly disagree with your manager on a key decision?", |
| | "How do you typically respond to critical feedback from peers or managers? Share a recent example.", |
| | "What motivates you the most at work, and how do you stay engaged during long or difficult projects?", |
| | "Describe how you build trust with new teammates when joining a project or company.", |
| | ] |
| | ) |
| |
|
| | questions: List[Question] = [] |
| | seen = set() |
| | for text in base: |
| | text = text.strip() |
| | if not text: |
| | continue |
| | if not text.endswith("?"): |
| | text = text.rstrip(". ") + "?" |
| | if text in seen: |
| | continue |
| | seen.add(text) |
| | questions.append(Question(id=len(questions) + 1, type="hr", question=text)) |
| | if len(questions) >= n_questions: |
| | break |
| |
|
| | return questions |
| |
|
| |
|
| | def _rag_fallback_behavioral( |
| | candidate_name: str, |
| | domain: str, |
| | role: str, |
| | projects: List[str], |
| | n_questions: int, |
| | ) -> List[Question]: |
| | """Behavioral / STAR-style fallback questions.""" |
| | base: List[str] = [] |
| |
|
| | proj = projects[0] if projects else None |
| |
|
| | if proj: |
| | base.append( |
| | f"Tell me about a time during \"{proj}\" when things were going badly or off-track. What did you do to turn it around?" |
| | ) |
| | base.append( |
| | f"Describe a time in \"{proj}\" where you had to make a difficult trade-off under time pressure. How did you decide and what was the outcome?" |
| | ) |
| |
|
| | base.extend( |
| | [ |
| | "Tell me about a time you had a serious conflict with a teammate or stakeholder. How did you handle it and what did you learn?", |
| | f"Describe a time you took ownership beyond your formal responsibilities in a {domain} / {role} context.", |
| | "Tell me about a failure or mistake you made at work or in a project. How did you respond, and what changed afterwards?", |
| | "Describe a situation where you had to influence people who did not report to you (e.g., other teams) to get something done.", |
| | "Tell me about a time you had to deliver under very ambiguous or changing requirements. How did you bring clarity?", |
| | ] |
| | ) |
| |
|
| | questions: List[Question] = [] |
| | seen = set() |
| | for text in base: |
| | text = text.strip() |
| | if not text.lower().startswith("tell me about a time"): |
| | text = "Tell me about a time " + text[0].lower() + text[1:] |
| | if not text.endswith("?"): |
| | text = text.rstrip(". ") + "?" |
| | if text in seen: |
| | continue |
| | seen.add(text) |
| | questions.append(Question(id=len(questions) + 1, type="behavioral", question=text)) |
| | if len(questions) >= n_questions: |
| | break |
| |
|
| | return questions |
| |
|
| |
|
| | def _rag_fallback_coding( |
| | candidate_name: str, |
| | domain: str, |
| | role: str, |
| | projects: List[str], |
| | n_questions: int, |
| | ) -> List[Question]: |
| | """Coding / algorithms fallback questions.""" |
| | base: List[str] = [] |
| |
|
| | base.extend( |
| | [ |
| | "Given an array of integers, write code to return the length of the longest increasing subsequence. Explain the time and space complexity.", |
| | "Design a function that checks if a string containing '(', ')', '{', '}', '[' and ']' has valid and balanced brackets. What data structure would you use and why?", |
| | "Given a large log file of user events, how would you efficiently find the top K most frequent user IDs? Discuss both the algorithm and complexity.", |
| | "Write a function to perform a BFS (breadth-first search) on a graph represented as an adjacency list. How would you modify it to find the shortest path in an unweighted graph?", |
| | f"In the context of a real {role} system, how would you design a solution to throttle API calls from a single user (rate limiter)? What data structures would you use in code?", |
| | ] |
| | ) |
| |
|
| | questions: List[Question] = [] |
| | seen = set() |
| | for text in base: |
| | text = text.strip() |
| | if not text.endswith("?"): |
| | text = text.rstrip(". ") + "?" |
| | if text in seen: |
| | continue |
| | seen.add(text) |
| | questions.append(Question(id=len(questions) + 1, type="coding", question=text)) |
| | if len(questions) >= n_questions: |
| | break |
| |
|
| | return questions |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def _build_prompt_for_mode( |
| | mode: str, |
| | req: GenerateQuestionsRequest, |
| | resume_text: str, |
| | candidate_name: str, |
| | ) -> str: |
| | """Build a strong HF prompt for HR / Behavioral / Coding.""" |
| | rag_context = build_rag_context( |
| | candidate_name=candidate_name, |
| | resume_text=resume_text, |
| | job_description=req.job_description, |
| | domain=req.domain, |
| | role=req.role, |
| | ) |
| |
|
| | base_header = f""" |
| | You are a seasoned hiring manager designing a **{mode.capitalize()} interview**. |
| | |
| | Candidate: {candidate_name or req.name} |
| | Role: {req.role} |
| | Domain: {req.domain} |
| | Number of questions needed: {req.n_questions} |
| | |
| | Below is RAG context from the candidate's resume + job description: |
| | [CONTEXT START] |
| | {rag_context} |
| | [CONTEXT END] |
| | """ |
| |
|
| | mode = mode.lower() |
| |
|
| | if mode == "hr": |
| | specifics = f""" |
| | Your task is to create EXACTLY {req.n_questions} **HR / culture-fit questions**. |
| | |
| | Requirements: |
| | - ALL questions must be HR-style (communication, teamwork, conflict, motivation, values, growth). |
| | - Do **NOT** ask detailed algorithms, coding, or system-design questions. |
| | - Use open-ended questions that cannot be answered with just one sentence. |
| | - Reference the candidate's projects or experience from the context when it makes sense. |
| | - Avoid yes/no questions. |
| | - Each question must end with a question mark (?). |
| | """ |
| | elif mode == "behavioral": |
| | specifics = f""" |
| | Your task is to create EXACTLY {req.n_questions} **behavioral interview questions**. |
| | |
| | Requirements: |
| | - EVERY question must be STAR-style (Situation, Task, Action, Result). |
| | - Start most questions with phrases like: |
| | * "Tell me about a time when..." |
| | * "Describe a situation where..." |
| | * "Give me an example of..." |
| | - Cover a mix of themes: ownership, failure, conflict, leadership, ambiguity, learning. |
| | - Use the candidate's projects and domain when helpful, but keep questions reusable. |
| | - No pure theory or trivia questions. |
| | - Each question must end with a question mark (?). |
| | """ |
| | elif mode == "coding": |
| | specifics = f""" |
| | Your task is to create EXACTLY {req.n_questions} **coding / algorithmic questions**. |
| | |
| | Requirements: |
| | - Focus on practical coding problems: data structures, algorithms, debugging, complexity. |
| | - For each question: |
| | * Clearly describe the problem in words only (no full solutions). |
| | * Mention what the candidate should implement or explain in code (e.g., in Python). |
| | * Optionally ask them to discuss time/space complexity or edge cases. |
| | - At least 3 questions should involve writing or designing code. |
| | - You may also include 1 system-design-ish coding question (e.g., rate limiter, caching). |
| | - Each question must end with a question mark (?). |
| | """ |
| | else: |
| | raise HTTPException(status_code=400, detail=f"Unknown mode '{mode}'") |
| |
|
| | format_instructions = f""" |
| | Output format (IMPORTANT): |
| | - Return ONLY a numbered list of questions. |
| | - No explanations, no JSON, no markdown headings. |
| | Example: |
| | 1. First question that ends with a question mark? |
| | 2. Second question that ends with a question mark? |
| | |
| | Now write the {req.n_questions} best {mode} questions in that format. |
| | """ |
| |
|
| | return (base_header + specifics + format_instructions).strip() |
| |
|
| |
|
| | async def _generate_for_mode(req: GenerateQuestionsRequest, mode: str) -> GenerateQuestionsResponse: |
| | """Shared implementation for the 3 new endpoints.""" |
| | |
| | resume_text = "" |
| | if req.resume_token: |
| | resume_text = RESUME_STORE.get(req.resume_token, "") |
| |
|
| | candidate_name = (req.name or "").strip() |
| | if not candidate_name and resume_text: |
| | candidate_name = extract_candidate_name(resume_text) |
| |
|
| | |
| | prompt = _build_prompt_for_mode(mode, req, resume_text, candidate_name) |
| |
|
| | questions: List[Question] = [] |
| |
|
| | |
| | try: |
| | raw = hf_generate_tiered(prompt, max_tokens=768) |
| | questions = parse_questions_from_model_output(raw, req.n_questions) |
| | print(f"[{mode}] Parsed {len(questions)} questions from HF output.") |
| | except Exception as e: |
| | print(f"[{mode}] Error calling HF tiered models:", e) |
| |
|
| | |
| | if not questions: |
| | print(f"[{mode}] No questions from HF. Using RAG fallback.") |
| | projects = extract_project_names(resume_text) |
| | if mode == "hr": |
| | questions = _rag_fallback_hr(candidate_name, req.domain, req.role, projects, req.n_questions) |
| | elif mode == "behavioral": |
| | questions = _rag_fallback_behavioral(candidate_name, req.domain, req.role, projects, req.n_questions) |
| | elif mode == "coding": |
| | questions = _rag_fallback_coding(candidate_name, req.domain, req.role, projects, req.n_questions) |
| |
|
| | if not questions: |
| | raise HTTPException(status_code=500, detail=f"Could not generate {mode} questions.") |
| |
|
| | return GenerateQuestionsResponse(questions=questions) |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | @app.post("/generate_questions_hr", response_model=GenerateQuestionsResponse) |
| | async def generate_questions_hr(req: GenerateQuestionsRequest): |
| | """ |
| | HR / culture-fit interview questions. |
| | """ |
| | return await _generate_for_mode(req, "hr") |
| |
|
| |
|
| | @app.post("/generate_questions_behavioral", response_model=GenerateQuestionsResponse) |
| | async def generate_questions_behavioral(req: GenerateQuestionsRequest): |
| | """ |
| | Behavioral (STAR) interview questions. |
| | """ |
| | return await _generate_for_mode(req, "behavioral") |
| |
|
| |
|
| | @app.post("/generate_questions_coding", response_model=GenerateQuestionsResponse) |
| | async def generate_questions_coding(req: GenerateQuestionsRequest): |
| | """ |
| | Coding / algorithmic interview questions. |
| | """ |
| | return await _generate_for_mode(req, "coding") |
| |
|