EnzGamers commited on
Commit
07257e8
·
verified ·
1 Parent(s): 9c8879a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -86
app.py CHANGED
@@ -6,44 +6,35 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
6
  import time, uuid, json, asyncio, requests
7
  from bs4 import BeautifulSoup
8
  from typing import Optional, List, Union
 
9
 
10
  # --- AGENT BRAIN (SYSTEM PROMPT) ---
11
  SYSTEM_PROMPT = """
12
- You are a senior expert WordPress and WooCommerce developer, now enhanced with agent-like capabilities. Your primary goal is to provide accurate, well-researched, and secure code solutions.
13
 
14
  ### AGENT WORKFLOW ###
15
- 1. **Think:** First, analyze the user's request and think step-by-step.
16
- 2. **Decide:** Decide if you have enough information to answer directly or if you need to use a tool.
17
- 3. **Act:** If you need a tool, respond ONLY with a single JSON object specifying the tool and its parameters. Do not add any other text.
18
- 4. **Answer:** If you don't need a tool, or after you have received information from a tool, provide a complete answer to the user.
19
-
20
- ### AVAILABLE TOOLS ###
21
- You can request to use one of the following tools by outputting a JSON object:
22
-
23
- **1. Web Browser / Page Reader**
24
- - **Description:** Use this tool to access a specific URL and read its content. This is useful for checking documentation, reading articles, or understanding the context of a webpage.
25
- - **JSON Format:**
26
- ```json
27
- {
28
- "tool": "browse",
29
- "url": "https://www.the-url-to-visit.com"
30
- }
31
- ```
32
-
33
- ### CODING RULES (Apply when providing the final answer) ###
34
- 1. **Never Modify Core Files.**
35
- 2. **Respect Hooks (actions and filters).**
36
- 3. **Security First (Escape, Sanitize, Use Nonces).**
37
- 4. **Prioritize Performance.**
38
- 5. **Follow WordPress Coding Standards.**
39
-
40
- ### RESPONSE FORMAT (For the final answer) ###
41
- 1. A brief explanation of the solution.
42
- 2. The complete and functional PHP code block.
43
- 3. A clear instruction on where to place the code.
44
  """
45
 
46
- # --- Configuration & Model Loading ---
47
  MODEL_ID = "deepseek-ai/deepseek-coder-1.3b-instruct"
48
  DEVICE = "cpu"
49
  print(f"Loading model: {MODEL_ID}")
@@ -54,21 +45,20 @@ print("Model and tokenizer loaded successfully.")
54
 
55
  app = FastAPI()
56
 
57
- # --- Tool Execution Functions ---
58
  def execute_browse_tool(url: str) -> str:
59
  try:
60
  headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
61
  response = requests.get(url, headers=headers, timeout=10)
62
  response.raise_for_status()
63
  soup = BeautifulSoup(response.content, 'html.parser')
64
- for script in soup(["script", "style"]):
65
- script.decompose()
66
  text = soup.get_text(separator='\n', strip=True)
67
  return f"Content from {url}:\n\n{text[:4000]}"
68
  except Exception as e:
69
  return f"Error browsing {url}: {str(e)}"
70
 
71
- # --- Pydantic Models ---
72
  class ContentPart(BaseModel): type: str; text: str
73
  class ChatMessage(BaseModel): role: str; content: Union[str, List[ContentPart]]
74
  class ChatCompletionRequest(BaseModel):
@@ -79,6 +69,11 @@ class ChatCompletionRequest(BaseModel):
79
  class ModelData(BaseModel): id: str; object: str = "model"; owned_by: str = "user"
80
  class ModelList(BaseModel): object: str = "list"; data: List[ModelData]
81
 
 
 
 
 
 
82
  # --- API Endpoints ---
83
  @app.get("/models", response_model=ModelList)
84
  async def list_models():
@@ -95,63 +90,73 @@ async def create_chat_completion(request: ChatCompletionRequest):
95
 
96
  if not user_prompt: return {"error": "Prompt not found."}
97
 
98
- initial_messages = [{'role': 'system', 'content': SYSTEM_PROMPT}, {'role': 'user', 'content': user_prompt}]
99
-
100
- # --- CORRECTION ICI : On garantit que 'inputs' est un dictionnaire ---
101
- # Étape 1: Formatter le texte
102
- formatted_prompt = tokenizer.apply_chat_template(initial_messages, tokenize=False, add_generation_prompt=True)
103
- # Étape 2: Tokenizer le texte formaté pour obtenir un dictionnaire
104
- inputs = tokenizer(formatted_prompt, return_tensors="pt", padding=True).to(DEVICE)
105
-
106
- outputs = model.generate(**inputs, max_new_tokens=150, eos_token_id=tokenizer.eos_token_id)
107
- thought_process = tokenizer.decode(outputs[0][len(inputs['input_ids'][0]):], skip_special_tokens=True)
108
-
109
- tool_call = None
110
- try:
111
- json_part = thought_process[thought_process.find('{'):thought_process.rfind('}')+1]
112
- if json_part:
113
- tool_call = json.loads(json_part)
114
- except json.JSONDecodeError:
115
- tool_call = None
116
 
117
- if tool_call and 'tool' in tool_call:
118
- tool_context = ""
119
- if tool_call['tool'] == 'browse' and 'url' in tool_call:
120
- print(f"--- AGENT: Browsing URL: {tool_call['url']} ---")
121
- tool_context = execute_browse_tool(tool_call['url'])
 
 
 
 
 
 
 
 
 
 
 
122
 
123
- final_messages = [
124
- {'role': 'system', 'content': SYSTEM_PROMPT},
125
- {'role': 'user', 'content': user_prompt},
126
- {'role': 'assistant', 'content': f"I have used the browse tool and retrieved the following information:\n{tool_context}"},
127
- {'role': 'system', 'content': "Now, provide the final, complete answer to the user based on this information."}
128
- ]
129
- else:
130
- final_messages = [
131
- {'role': 'system', 'content': SYSTEM_PROMPT},
132
- {'role': 'user', 'content': user_prompt},
133
- {'role': 'assistant', 'content': thought_process}
134
- ]
135
-
136
- # --- DEUXIÈME CORRECTION ICI : On applique la même logique ---
137
- final_formatted_prompt = tokenizer.apply_chat_template(final_messages, tokenize=False, add_generation_prompt=True)
138
- final_inputs = tokenizer(final_formatted_prompt, return_tensors="pt", padding=True).to(DEVICE)
139
-
140
- final_outputs = model.generate(**final_inputs, max_new_tokens=1024, do_sample=True, temperature=0.1, top_k=50, top_p=0.95, eos_token_id=tokenizer.eos_token_id)
141
- response_text = tokenizer.decode(final_outputs[0][len(final_inputs['input_ids'][0]):], skip_special_tokens=True)
142
-
143
- async def stream_generator():
144
- response_id = f"chatcmpl-{uuid.uuid4()}"
145
- for char in response_text:
146
- chunk = {"id": response_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": MODEL_ID, "choices": [{"index": 0, "delta": {"content": char}, "finish_reason": None}]}
147
- yield f"data: {json.dumps(chunk)}\n\n"
148
- await asyncio.sleep(0.005)
 
 
 
 
149
  final_chunk = {"id": response_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": MODEL_ID, "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]}
150
  yield f"data: {json.dumps(final_chunk)}\n\n"
151
  yield "data: [DONE]\n\n"
152
 
153
- return StreamingResponse(stream_generator(), media_type="text/event-stream")
154
 
155
  @app.get("/")
156
  def root():
157
- return {"status": "Advanced Agent for WordPress/WooCommerce is online", "model_id": MODEL_ID}
 
6
  import time, uuid, json, asyncio, requests
7
  from bs4 import BeautifulSoup
8
  from typing import Optional, List, Union
9
+ import re # On importe le module pour les expressions régulières
10
 
11
  # --- AGENT BRAIN (SYSTEM PROMPT) ---
12
  SYSTEM_PROMPT = """
13
+ You are a highly advanced AI agent specializing in WordPress & WooCommerce development. You must follow a strict "Think, Act, Answer" workflow for every user request. Your primary directive is to be transparent, showing your thought process before taking any action.
14
 
15
  ### AGENT WORKFLOW ###
16
+ You MUST structure your response within the following XML tags. This is not optional.
17
+
18
+ 1. **<thinking>**
19
+ - First, think step-by-step. Analyze the user's request.
20
+ - Break down the problem. Formulate a plan.
21
+ - Decide if you need to use a tool to gather more information (like checking official documentation for the latest best practices).
22
+ - Your entire thought process goes here.
23
+ </thinking>
24
+
25
+ 2. **<tool_code>**
26
+ - If you decide to use a tool, place the single JSON object for that tool here.
27
+ - If you do not need a tool, this tag MUST be empty.
28
+ - Example: `{"tool": "browse", "url": "https://developer.wordpress.org/reference/functions/add_action/"}`
29
+ </tool_code>
30
+
31
+ 3. **<final_answer>**
32
+ - If you can answer the user's request WITHOUT using a tool, formulate the complete and final answer here.
33
+ - If you used a tool, leave this tag empty in your first response. You will be given the tool's output and asked to generate the final answer in a second step.
34
+ </final_answer>
 
 
 
 
 
 
 
 
 
 
35
  """
36
 
37
+ # --- Configuration & Model Loading --- (Identique)
38
  MODEL_ID = "deepseek-ai/deepseek-coder-1.3b-instruct"
39
  DEVICE = "cpu"
40
  print(f"Loading model: {MODEL_ID}")
 
45
 
46
  app = FastAPI()
47
 
48
+ # --- Tool Execution Functions --- (Identique)
49
  def execute_browse_tool(url: str) -> str:
50
  try:
51
  headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
52
  response = requests.get(url, headers=headers, timeout=10)
53
  response.raise_for_status()
54
  soup = BeautifulSoup(response.content, 'html.parser')
55
+ for script in soup(["script", "style"]): script.decompose()
 
56
  text = soup.get_text(separator='\n', strip=True)
57
  return f"Content from {url}:\n\n{text[:4000]}"
58
  except Exception as e:
59
  return f"Error browsing {url}: {str(e)}"
60
 
61
+ # --- Pydantic Models --- (Identique)
62
  class ContentPart(BaseModel): type: str; text: str
63
  class ChatMessage(BaseModel): role: str; content: Union[str, List[ContentPart]]
64
  class ChatCompletionRequest(BaseModel):
 
69
  class ModelData(BaseModel): id: str; object: str = "model"; owned_by: str = "user"
70
  class ModelList(BaseModel): object: str = "list"; data: List[ModelData]
71
 
72
+ # --- Helper function to parse XML-like tags ---
73
+ def parse_tag(tag: str, text: str) -> str:
74
+ match = re.search(f'<{tag}>(.*?)</{tag}>', text, re.DOTALL)
75
+ return match.group(1).strip() if match else ""
76
+
77
  # --- API Endpoints ---
78
  @app.get("/models", response_model=ModelList)
79
  async def list_models():
 
90
 
91
  if not user_prompt: return {"error": "Prompt not found."}
92
 
93
+ async def stream_agent_process():
94
+ response_id = f"chatcmpl-{uuid.uuid4()}"
95
+
96
+ def stream_chunk(content: str):
97
+ chunk = {"id": response_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": MODEL_ID, "choices": [{"index": 0, "delta": {"content": content}, "finish_reason": None}]}
98
+ return f"data: {json.dumps(chunk)}\n\n"
99
+
100
+ # --- STEP 1: Planification ---
101
+ initial_messages = [{'role': 'system', 'content': SYSTEM_PROMPT}, {'role': 'user', 'content': user_prompt}]
102
+ formatted_prompt = tokenizer.apply_chat_template(initial_messages, tokenize=False, add_generation_prompt=True)
103
+ inputs = tokenizer(formatted_prompt, return_tensors="pt", padding=True).to(DEVICE)
104
+ outputs = model.generate(**inputs, max_new_tokens=1024, eos_token_id=tokenizer.eos_token_id)
105
+ agent_plan = tokenizer.decode(outputs[0][len(inputs['input_ids'][0]):], skip_special_tokens=True)
 
 
 
 
 
106
 
107
+ thinking_text = parse_tag("thinking", agent_plan)
108
+ tool_code_text = parse_tag("tool_code", agent_plan)
109
+ final_answer_text = parse_tag("final_answer", agent_plan)
110
+
111
+ # --- STEP 2: Diffusion de la pensée ---
112
+ if thinking_text:
113
+ yield stream_chunk(f"🤔 **Thinking...**\n```thought\n{thinking_text}\n```\n\n")
114
+ await asyncio.sleep(0.1)
115
+
116
+ # --- STEP 3: Action & Synthèse ---
117
+ tool_call = None
118
+ if tool_code_text:
119
+ try:
120
+ tool_call = json.loads(tool_code_text)
121
+ except json.JSONDecodeError:
122
+ pass
123
 
124
+ if tool_call and 'tool' in tool_call:
125
+ # --- Exécution et Diffusion de l'Action ---
126
+ if tool_call['tool'] == 'browse' and 'url' in tool_call:
127
+ url = tool_call['url']
128
+ yield stream_chunk(f"🔎 **Action:** Browsing `{url}`...\n\n")
129
+ await asyncio.sleep(0.1)
130
+ tool_context = execute_browse_tool(url)
131
+ else:
132
+ tool_context = "Unknown tool requested."
133
+
134
+ # --- Appel de synthèse ---
135
+ synthesis_messages = [
136
+ {'role': 'system', 'content': SYSTEM_PROMPT},
137
+ {'role': 'user', 'content': user_prompt},
138
+ {'role': 'assistant', 'content': f"<thinking>{thinking_text}</thinking><tool_code>{tool_code_text}</tool_code>"},
139
+ {'role': 'system', 'content': f"Here is the result from your tool use:\n\n<tool_result>\n{tool_context}\n</tool_result>\n\nNow, generate the final, complete answer inside the <final_answer> tag."}
140
+ ]
141
+ synthesis_prompt = tokenizer.apply_chat_template(synthesis_messages, tokenize=False, add_generation_prompt=True)
142
+ synthesis_inputs = tokenizer(synthesis_prompt, return_tensors="pt", padding=True).to(DEVICE)
143
+ synthesis_outputs = model.generate(**synthesis_inputs, max_new_tokens=1024, do_sample=True, temperature=0.1, top_k=50, top_p=0.95, eos_token_id=tokenizer.eos_token_id)
144
+ final_response = tokenizer.decode(synthesis_outputs[0][len(synthesis_inputs['input_ids'][0]):], skip_special_tokens=True)
145
+ final_answer_text = parse_tag("final_answer", final_response)
146
+
147
+ # --- STEP 4: Diffusion de la Réponse Finale ---
148
+ if final_answer_text:
149
+ yield stream_chunk(f"✅ **Final Answer:**\n{final_answer_text}")
150
+ else:
151
+ yield stream_chunk("Agent could not generate a final answer.")
152
+
153
+ # --- Fin du stream ---
154
  final_chunk = {"id": response_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": MODEL_ID, "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]}
155
  yield f"data: {json.dumps(final_chunk)}\n\n"
156
  yield "data: [DONE]\n\n"
157
 
158
+ return StreamingResponse(stream_agent_process(), media_type="text/event-stream")
159
 
160
  @app.get("/")
161
  def root():
162
+ return {"status": "Transparent Reasoning Agent for WordPress/WooCommerce is online", "model_id": MODEL_ID}