CreamyClouds Claude commited on
Commit
af4a2b1
·
1 Parent(s): 9e799ac

fix: Update Engineer to use dynamic prompt getter

Browse files

- Remove ENGINEER_SYSTEM_PROMPT constant import
- Use get_engineer_system_prompt() function for per-request evaluation
- Ensures correct sandbox environment detection on HuggingFace Spaces

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. app/agents/engineer.py +17 -13
app/agents/engineer.py CHANGED
@@ -16,7 +16,7 @@ from app.core.llm_provider import LLMProviderFactory
16
  from app.core.logging import get_logger
17
  from app.core.exceptions import EngineerAgentError
18
  from app.agents.prompts import (
19
- ENGINEER_SYSTEM_PROMPT,
20
  ENGINEER_USER_PROMPT_TEMPLATE,
21
  ENGINEER_RETRY_PROMPT_TEMPLATE
22
  )
@@ -57,19 +57,21 @@ class EngineerAgent:
57
  # Get LLM provider
58
  self.llm = LLMProviderFactory.create_from_settings()
59
 
60
- # Create prompt templates (we'll choose at runtime)
61
- self.normal_prompt = ChatPromptTemplate.from_messages([
62
- ("system", ENGINEER_SYSTEM_PROMPT),
63
- ("human", ENGINEER_USER_PROMPT_TEMPLATE)
64
- ])
65
-
66
- self.retry_prompt = ChatPromptTemplate.from_messages([
67
- ("system", ENGINEER_SYSTEM_PROMPT),
68
- ("human", ENGINEER_RETRY_PROMPT_TEMPLATE)
69
- ])
70
 
71
  logger.info("Engineer Agent initialized")
72
 
 
 
 
 
 
 
 
 
73
  def refresh_llm(self):
74
  """Refresh LLM provider with latest settings from Redis."""
75
  self.llm = LLMProviderFactory.create_from_settings()
@@ -154,7 +156,8 @@ class EngineerAgent:
154
  }
155
  )
156
 
157
- chain = self.retry_prompt | self.llm.generate | StrOutputParser()
 
158
 
159
  code = await chain.ainvoke({
160
  "function_name": function_name,
@@ -170,7 +173,8 @@ class EngineerAgent:
170
  extra={"function_name": function_name}
171
  )
172
 
173
- chain = self.normal_prompt | self.llm.generate | StrOutputParser()
 
174
 
175
  code = await chain.ainvoke({
176
  "function_name": function_name,
 
16
  from app.core.logging import get_logger
17
  from app.core.exceptions import EngineerAgentError
18
  from app.agents.prompts import (
19
+ get_engineer_system_prompt,
20
  ENGINEER_USER_PROMPT_TEMPLATE,
21
  ENGINEER_RETRY_PROMPT_TEMPLATE
22
  )
 
57
  # Get LLM provider
58
  self.llm = LLMProviderFactory.create_from_settings()
59
 
60
+ # Store prompt templates - system prompt is generated dynamically per-request
61
+ # to ensure correct sandbox environment detection
62
+ self._user_prompt_template = ENGINEER_USER_PROMPT_TEMPLATE
63
+ self._retry_prompt_template = ENGINEER_RETRY_PROMPT_TEMPLATE
 
 
 
 
 
 
64
 
65
  logger.info("Engineer Agent initialized")
66
 
67
+ def _get_prompt(self, is_retry: bool = False) -> ChatPromptTemplate:
68
+ """Get prompt template with dynamic system prompt."""
69
+ human_template = self._retry_prompt_template if is_retry else self._user_prompt_template
70
+ return ChatPromptTemplate.from_messages([
71
+ ("system", get_engineer_system_prompt()), # Dynamic per-request
72
+ ("human", human_template)
73
+ ])
74
+
75
  def refresh_llm(self):
76
  """Refresh LLM provider with latest settings from Redis."""
77
  self.llm = LLMProviderFactory.create_from_settings()
 
156
  }
157
  )
158
 
159
+ prompt = self._get_prompt(is_retry=True)
160
+ chain = prompt | self.llm.generate | StrOutputParser()
161
 
162
  code = await chain.ainvoke({
163
  "function_name": function_name,
 
173
  extra={"function_name": function_name}
174
  )
175
 
176
+ prompt = self._get_prompt(is_retry=False)
177
+ chain = prompt | self.llm.generate | StrOutputParser()
178
 
179
  code = await chain.ainvoke({
180
  "function_name": function_name,