Ritvik commited on
Commit
dbee35a
Β·
1 Parent(s): 3028ab2

Updated app 4

Browse files
Files changed (1) hide show
  1. app.py +347 -68
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import gradio as gr
2
  from groq import Groq
3
  from dotenv import load_dotenv
4
- from duckduckgo_search import DDGS
5
  import os
6
  import traceback
7
  import json
8
  import time
9
  from collections import defaultdict
 
10
  import requests
11
 
12
  # Load .env environment variables
@@ -78,8 +78,22 @@ def web_search_duckduckgo(query: str, max_results: int = 5, max_retries: int = 2
78
  time.sleep(1)
79
 
80
  # ReAct agent response with thought process
81
- def respond(message, history, system_message, max_tokens, temperature, top_p, vehicle_profile):
82
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  # Initialize messages with ReAct system prompt
84
  react_prompt = (
85
  f"{system_message}\n\n"
@@ -87,24 +101,18 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
87
  "1. **Thought**: Reason about the query and decide the next step. Check the diagnostics database first for known issues. For location-specific queries (e.g., garages, repair shops) or real-time data (e.g., pricing, availability), prioritize web search. For community questions, check the Q&A store.\n"
88
  "2. **Observation**: Note relevant information (e.g., user input, vehicle profile, tool results, or context).\n"
89
  "3. **Action**: Choose an action: 'search' (web search), 'respond' (final answer), 'clarify' (ask for details), 'add_qa' (add to Q&A store), or 'get_qa' (retrieve Q&A).\n"
90
- "Format your response as a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'. Example:\n"
91
- "{\n"
92
- " \"thought\": \"User asks for garages in Dehradun, need to search.\",\n"
93
- " \"observation\": \"Location: Dehradun\",\n"
94
- " \"action\": \"search\",\n"
95
- " \"search_query\": \"car repair shops Dehradun\"\n"
96
- "}\n"
97
  f"User vehicle profile: {json.dumps(vehicle_profile)}\n"
98
  "Use the search tool for locations, prices, or real-time data. Ensure valid JSON."
99
  )
100
  messages = [{"role": "system", "content": react_prompt}]
101
 
102
- # Add history
103
  for msg in history:
104
- role = msg.get("role")
105
- content = msg.get("content")
106
- if role in ["user", "assistant"] and content:
107
- messages.append({"role": role, "content": content})
108
  messages.append({"role": "user", "content": message})
109
 
110
  # Trigger keywords for garage search
@@ -121,7 +129,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
121
  "car noise issue", "check engine light", "dashboard warning light", "local garage",
122
  "trusted mechanic", "authorized service center", "car towing service near me",
123
  "car not starting", "flat battery", "jump start service", "roadside assistance",
124
- "ac not cooling", "car breakdown", "pickup and drop car service"
125
  ]
126
 
127
  # Check diagnostics database
@@ -135,14 +143,17 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
135
  f"- **Severity**: {details['severity']}\n"
136
  f"Would you like to search for garages to address this issue or learn more?"
137
  )
138
- yield response
 
139
  return
140
 
141
  # Check for community Q&A keywords
142
  if any(kw in message.lower() for kw in ["community", "forum", "discussion", "share advice", "ask community"]):
143
  if "post" in message.lower() or "share" in message.lower():
144
  community_qa.append({"question": message, "answers": []})
145
- yield "Your question has been posted to the community! Check back for answers."
 
 
146
  return
147
  elif "view" in message.lower() or "see" in message.lower():
148
  if community_qa:
@@ -152,15 +163,23 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
152
  )
153
  else:
154
  response = "No community questions yet. Post one with 'share' or 'post'!"
155
- yield response
 
156
  return
157
 
158
  # Check for trigger keywords to directly perform search
159
  if any(keyword in message.lower() for keyword in trigger_keywords):
160
  print(f"Trigger keyword detected in query: {message}")
161
- search_results = web_search_duckduckgo(message)
 
 
 
 
 
 
162
  print(f"Search Results:\n{search_results}")
163
- final_response = f"πŸ” Here are some results I found:\n\n{search_results}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
 
164
  for i in range(0, len(final_response), 10):
165
  yield final_response[:i + 10]
166
  return
@@ -169,12 +188,18 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
169
  max_iterations = 3
170
  max_json_retries = 2
171
  current_response = ""
 
 
 
172
  for iteration in range(max_iterations):
173
  print(f"\n--- ReAct Iteration {iteration + 1} ---")
174
 
175
  # Call LLM with current messages
176
  for retry in range(max_json_retries):
177
  try:
 
 
 
178
  completion = client.chat.completions.create(
179
  model=MODEL_NAME,
180
  messages=messages,
@@ -184,63 +209,117 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
184
  stream=False,
185
  )
186
  raw_response = completion.choices[0].message.content
 
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  # Parse LLM response
189
  try:
190
  react_step = json.loads(raw_response)
191
  thought = react_step.get("thought", "")
192
  observation = react_step.get("observation", "")
193
  action = react_step.get("action", "")
 
194
 
195
- # Log to console
196
  print("Thought:", thought)
197
  print("Observation:", observation)
198
  print("Action:", action)
 
199
  break
200
  except json.JSONDecodeError:
201
- print(f"Error: LLM response is not valid JSON (attempt {retry + 1}/{max_json_retries}).")
202
  if retry + 1 == max_json_retries:
203
  print("Max retries reached. Treating as direct response.")
204
- react_step = {"response": raw_response, "action": "respond"}
205
- thought = "N/A (Invalid JSON)"
206
- observation = "N/A (Invalid JSON)"
207
- action = "respond"
 
 
 
 
 
 
 
208
  else:
209
  messages.append({
210
  "role": "system",
211
- "content": "Previous response was not valid JSON. Please provide a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'."
212
  })
213
  except Exception as e:
214
  print(f"LLM call failed (attempt {retry + 1}/{max_json_retries}): {str(e)}")
215
  if retry + 1 == max_json_retries:
216
- react_step = {"response": f"⚠️ Failed to process query: {str(e)}", "action": "respond"}
217
- thought = "N/A (LLM error)"
218
- observation = "N/A (LLM error)"
219
- action = "respond"
 
 
 
 
 
 
220
  else:
221
  time.sleep(1)
222
 
223
  # Handle action
224
  if action == "search":
225
  search_query = react_step.get("search_query", message)
 
 
 
 
 
 
226
  print(f"Performing web search for: {search_query}")
227
  search_results = web_search_duckduckgo(search_query)
228
- messages.append({"role": "assistant", "content": raw_response})
229
- messages.append({
230
- "role": "system",
231
- "content": f"Search results for '{search_query}':\n{search_results}"
232
- })
233
  print(f"Search Results:\n{search_results}")
 
 
234
 
235
  elif action == "respond":
236
- final_response = react_step.get("response", raw_response)
237
- current_response = f"{final_response}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
238
  print(f"Final Response:\n{current_response}")
239
  break
240
  elif action == "clarify":
241
- clarification = react_step.get("response", "Please provide more details.")
242
- messages.append({"role": "assistant", "content": raw_response})
243
- current_response = clarification
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  print(f"Clarification Request:\n{current_response}")
245
  elif action == "add_qa":
246
  qa_content = react_step.get("qa_content", message)
@@ -260,9 +339,9 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
260
  break
261
  else:
262
  print("Unknown action, continuing to next iteration.")
263
- messages.append({"role": "assistant", "content": raw_response})
264
 
265
- # Stream final response to Gradio
266
  for i in range(0, len(current_response), 10):
267
  yield current_response[:i + 10]
268
 
@@ -271,45 +350,245 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, ve
271
  print(error_msg)
272
  yield error_msg
273
 
274
- # Gradio interface with vehicle profile
275
- with gr.Blocks(title="CarMaa - India's AI Car Doctor") as demo:
276
- gr.Markdown("# CarMaa - India's AI Car Doctor")
277
- gr.Markdown("Your trusted AI for car diagnostics, garage searches, and community advice.")
278
-
279
- # Vehicle profile inputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  with gr.Row():
281
- make_model = gr.Textbox(label="Vehicle Make and Model (e.g., Maruti Alto)", placeholder="Enter your car's make and model")
282
- year = gr.Textbox(label="Year", placeholder="Enter the year of manufacture")
283
- city = gr.Textbox(label="City", placeholder="Enter your city")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  vehicle_profile = gr.State(value={"make_model": "", "year": "", "city": ""})
285
 
286
  # Update vehicle profile
287
  def update_vehicle_profile(make_model, year, city):
288
  return {"make_model": make_model, "year": year, "city": city}
289
 
290
- gr.Button("Save Vehicle Profile").click(
291
  fn=update_vehicle_profile,
292
  inputs=[make_model, year, city],
293
  outputs=vehicle_profile
294
  )
295
 
296
- # Chat interface
297
  chatbot = gr.ChatInterface(
298
  fn=respond,
299
- additional_inputs=[
300
- gr.Textbox(value=(
301
- "You are CarMaa, a highly intelligent and trusted AI Car Doctor trained on comprehensive automobile data, diagnostics, "
302
- "and service records with specialized knowledge of Indian vehicles, road conditions, and market pricing. Your role is to "
303
- "guide car owners with accurate insights, including service intervals, symptoms, estimated repair costs, garage locations, "
304
- "climate effects, and fuel-efficiency tips. Personalize answers by vehicle details and city. Engage users as a community by "
305
- "allowing Q&A posts and sharing maintenance tips."
306
- ), label="System message"),
307
- gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max new tokens"),
308
- gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
309
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
310
- vehicle_profile
311
- ],
312
- type="messages"
 
 
313
  )
314
 
315
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from groq import Groq
3
  from dotenv import load_dotenv
 
4
  import os
5
  import traceback
6
  import json
7
  import time
8
  from collections import defaultdict
9
+ from duckduckgo_search import DDGS
10
  import requests
11
 
12
  # Load .env environment variables
 
78
  time.sleep(1)
79
 
80
  # ReAct agent response with thought process
81
+ def respond(message, history, vehicle_profile):
82
  try:
83
+ # Default values (hidden from UI)
84
+ system_message = (
85
+ "You are CarMaa, a highly intelligent and trusted AI Car Doctor trained on comprehensive automobile data, diagnostics, "
86
+ "and service records with specialized knowledge of Indian vehicles, road conditions, and market pricing. Your role is to "
87
+ "guide car owners with accurate insights, including service intervals, symptoms, estimated repair costs, garage locations, "
88
+ "climate effects, and fuel-efficiency tips. Personalize answers by vehicle details and city. Engage users as a community by "
89
+ "allowing Q&A posts and sharing maintenance tips. ALWAYS respond with a valid JSON object containing 'thought', 'observation', 'action', "
90
+ "and optionally 'search_query', 'response', or 'qa_content'. Do NOT include any text outside the JSON object. Example: "
91
+ "{\"thought\": \"User asks for garages, need to search.\", \"observation\": \"Location: Delhi\", \"action\": \"search\", \"search_query\": \"car garages Delhi\"}"
92
+ )
93
+ max_tokens = 1024
94
+ temperature = 0.7
95
+ top_p = 0.95
96
+
97
  # Initialize messages with ReAct system prompt
98
  react_prompt = (
99
  f"{system_message}\n\n"
 
101
  "1. **Thought**: Reason about the query and decide the next step. Check the diagnostics database first for known issues. For location-specific queries (e.g., garages, repair shops) or real-time data (e.g., pricing, availability), prioritize web search. For community questions, check the Q&A store.\n"
102
  "2. **Observation**: Note relevant information (e.g., user input, vehicle profile, tool results, or context).\n"
103
  "3. **Action**: Choose an action: 'search' (web search), 'respond' (final answer), 'clarify' (ask for details), 'add_qa' (add to Q&A store), or 'get_qa' (retrieve Q&A).\n"
104
+ "Format your response as a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'.\n"
 
 
 
 
 
 
105
  f"User vehicle profile: {json.dumps(vehicle_profile)}\n"
106
  "Use the search tool for locations, prices, or real-time data. Ensure valid JSON."
107
  )
108
  messages = [{"role": "system", "content": react_prompt}]
109
 
110
+ # Convert Gradio chat history to OpenAI-style format
111
  for msg in history:
112
+ if msg["role"] == "user":
113
+ messages.append({"role": "user", "content": msg["content"]})
114
+ elif msg["role"] == "assistant":
115
+ messages.append({"role": "assistant", "content": msg["content"]})
116
  messages.append({"role": "user", "content": message})
117
 
118
  # Trigger keywords for garage search
 
129
  "car noise issue", "check engine light", "dashboard warning light", "local garage",
130
  "trusted mechanic", "authorized service center", "car towing service near me",
131
  "car not starting", "flat battery", "jump start service", "roadside assistance",
132
+ "ac not cooling", "car breakdown", "pickup and drop car service", "service centers for car repair"
133
  ]
134
 
135
  # Check diagnostics database
 
143
  f"- **Severity**: {details['severity']}\n"
144
  f"Would you like to search for garages to address this issue or learn more?"
145
  )
146
+ for i in range(0, len(response), 10):
147
+ yield response[:i + 10]
148
  return
149
 
150
  # Check for community Q&A keywords
151
  if any(kw in message.lower() for kw in ["community", "forum", "discussion", "share advice", "ask community"]):
152
  if "post" in message.lower() or "share" in message.lower():
153
  community_qa.append({"question": message, "answers": []})
154
+ response = "Your question has been posted to the community! Check back for answers."
155
+ for i in range(0, len(response), 10):
156
+ yield response[:i + 10]
157
  return
158
  elif "view" in message.lower() or "see" in message.lower():
159
  if community_qa:
 
163
  )
164
  else:
165
  response = "No community questions yet. Post one with 'share' or 'post'!"
166
+ for i in range(0, len(response), 10):
167
+ yield response[:i + 10]
168
  return
169
 
170
  # Check for trigger keywords to directly perform search
171
  if any(keyword in message.lower() for keyword in trigger_keywords):
172
  print(f"Trigger keyword detected in query: {message}")
173
+ # Enhance search query with "car" context and city from vehicle profile if available
174
+ search_query = message.replace("reapir", "repair") # Correct typo
175
+ if "car" not in search_query.lower():
176
+ search_query = f"car {search_query}"
177
+ if vehicle_profile.get("city"):
178
+ search_query = f"{search_query} {vehicle_profile['city']}"
179
+ search_results = web_search_duckduckgo(search_query)
180
  print(f"Search Results:\n{search_results}")
181
+ final_response = f"πŸ” Here are some car repair service centers I found:\n\n{search_results}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
182
+ # Ensure the response is yielded to the UI
183
  for i in range(0, len(final_response), 10):
184
  yield final_response[:i + 10]
185
  return
 
188
  max_iterations = 3
189
  max_json_retries = 2
190
  current_response = ""
191
+ previous_raw_response = None
192
+ clarification_count = 0 # Track number of clarification requests
193
+
194
  for iteration in range(max_iterations):
195
  print(f"\n--- ReAct Iteration {iteration + 1} ---")
196
 
197
  # Call LLM with current messages
198
  for retry in range(max_json_retries):
199
  try:
200
+ # Add a slight delay to avoid rate limits
201
+ time.sleep(0.5)
202
+
203
  completion = client.chat.completions.create(
204
  model=MODEL_NAME,
205
  messages=messages,
 
209
  stream=False,
210
  )
211
  raw_response = completion.choices[0].message.content
212
+ print(f"Raw LLM Response: {raw_response}") # Log raw response for debugging
213
 
214
+ # Check if the response is empty or unchanged
215
+ if not raw_response or raw_response == previous_raw_response:
216
+ print(f"LLM returned empty or unchanged response on attempt {retry + 1}/{max_json_retries}")
217
+ if retry + 1 == max_json_retries:
218
+ react_step = {
219
+ "thought": "LLM failed to provide a new response",
220
+ "observation": "No new response received",
221
+ "action": "respond",
222
+ "response": "Sorry, I couldn't process your request properly. Please try again or provide more details."
223
+ }
224
+ thought = react_step["thought"]
225
+ observation = react_step["observation"]
226
+ action = react_step["action"]
227
+ response = react_step["response"]
228
+ break
229
+ else:
230
+ messages.append({
231
+ "role": "system",
232
+ "content": "Previous response was empty or unchanged. You MUST provide a new, valid JSON object containing 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'. No text outside JSON is allowed."
233
+ })
234
+ continue
235
+
236
  # Parse LLM response
237
  try:
238
  react_step = json.loads(raw_response)
239
  thought = react_step.get("thought", "")
240
  observation = react_step.get("observation", "")
241
  action = react_step.get("action", "")
242
+ response = react_step.get("response", "")
243
 
244
+ # Log to terminal
245
  print("Thought:", thought)
246
  print("Observation:", observation)
247
  print("Action:", action)
248
+ previous_raw_response = raw_response
249
  break
250
  except json.JSONDecodeError:
251
+ print(f"Error: LLM response is not valid JSON (attempt {retry + 1}/{max_json_retries}). Raw response: {raw_response}")
252
  if retry + 1 == max_json_retries:
253
  print("Max retries reached. Treating as direct response.")
254
+ react_step = {
255
+ "thought": "Unable to parse JSON",
256
+ "observation": "Invalid LLM output",
257
+ "action": "respond",
258
+ "response": "Sorry, I couldn't process your request properly. Please try again or provide more details."
259
+ }
260
+ thought = react_step["thought"]
261
+ observation = react_step["observation"]
262
+ action = react_step["action"]
263
+ response = react_step["response"]
264
+ previous_raw_response = raw_response
265
  else:
266
  messages.append({
267
  "role": "system",
268
+ "content": "Previous response was not valid JSON. You MUST respond with a valid JSON object containing 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'. No text outside JSON is allowed."
269
  })
270
  except Exception as e:
271
  print(f"LLM call failed (attempt {retry + 1}/{max_json_retries}): {str(e)}")
272
  if retry + 1 == max_json_retries:
273
+ react_step = {
274
+ "thought": "LLM call failed",
275
+ "observation": f"Error: {str(e)}",
276
+ "action": "respond",
277
+ "response": f"⚠️ Failed to process query: {str(e)}"
278
+ }
279
+ thought = react_step["thought"]
280
+ observation = react_step["observation"]
281
+ action = react_step["action"]
282
+ response = react_step["response"]
283
  else:
284
  time.sleep(1)
285
 
286
  # Handle action
287
  if action == "search":
288
  search_query = react_step.get("search_query", message)
289
+ # Enhance search query with "car" context and city from vehicle profile if available
290
+ search_query = search_query.replace("reapir", "repair") # Correct typo
291
+ if "car" not in search_query.lower():
292
+ search_query = f"car {search_query}"
293
+ if vehicle_profile.get("city"):
294
+ search_query = f"{search_query} {vehicle_profile['city']}"
295
  print(f"Performing web search for: {search_query}")
296
  search_results = web_search_duckduckgo(search_query)
297
+ messages.append({"role": "system", "content": f"Search results for '{search_query}':\n{search_results}"})
 
 
 
 
298
  print(f"Search Results:\n{search_results}")
299
+ current_response = f"πŸ” Here are some car repair service centers I found:\n\n{search_results}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
300
+ break # Exit loop to display results immediately
301
 
302
  elif action == "respond":
303
+ current_response = f"{response}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
 
304
  print(f"Final Response:\n{current_response}")
305
  break
306
  elif action == "clarify":
307
+ clarification_count += 1
308
+ current_response = response or "Please provide more details."
309
+ # Avoid repetitive clarification by modifying the context
310
+ if clarification_count >= max_iterations:
311
+ # Fallback to search if clarification isn't helping
312
+ search_query = message.replace("reapir", "repair") # Correct typo
313
+ if "car" not in search_query.lower():
314
+ search_query = f"car {search_query}"
315
+ if vehicle_profile.get("city"):
316
+ search_query = f"{search_query} {vehicle_profile['city']}"
317
+ print(f"Performing web search after max clarifications for: {search_query}")
318
+ search_results = web_search_duckduckgo(search_query)
319
+ current_response = f"πŸ” I couldn't get enough details, but here are some car repair service centers I found:\n\n{search_results}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
320
+ print(f"Search Results:\n{search_results}")
321
+ break
322
+ messages.append({"role": "assistant", "content": json.dumps(react_step)})
323
  print(f"Clarification Request:\n{current_response}")
324
  elif action == "add_qa":
325
  qa_content = react_step.get("qa_content", message)
 
339
  break
340
  else:
341
  print("Unknown action, continuing to next iteration.")
342
+ messages.append({"role": "assistant", "content": json.dumps(react_step)})
343
 
344
+ # Stream the final response to the UI
345
  for i in range(0, len(current_response), 10):
346
  yield current_response[:i + 10]
347
 
 
350
  print(error_msg)
351
  yield error_msg
352
 
353
+ # Gradio interface with enhanced, customer-centric UI
354
+ with gr.Blocks(
355
+ title="CarMaa - India's AI Car Doctor",
356
+ css="""
357
+ /* Overall layout and background */
358
+ .gradio-container {
359
+ background: linear-gradient(135deg, #1a1a1a, #2c2c2c);
360
+ font-family: 'Arial', sans-serif;
361
+ color: #ffffff;
362
+ }
363
+ /* Header styling with car-themed elements */
364
+ .header {
365
+ background: url('https://www.transparenttextures.com/patterns/asfalt-dark.png');
366
+ padding: 20px;
367
+ border-radius: 10px 10px 0 0;
368
+ border-bottom: 3px solid #ff4d4d;
369
+ text-align: center;
370
+ box-shadow: 0 4px 10px rgba(0, 0, 0, 0.5);
371
+ }
372
+ .header h1 {
373
+ font-size: 2.5em;
374
+ font-weight: bold;
375
+ color: #ff4d4d;
376
+ text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.7);
377
+ margin: 0;
378
+ }
379
+ .header p {
380
+ color: #d1d1d1;
381
+ font-size: 1.1em;
382
+ margin-top: 5px;
383
+ }
384
+ /* Vehicle profile inputs with car-themed styling */
385
+ .vehicle-profile {
386
+ background: #2c2c2c;
387
+ padding: 20px;
388
+ border-radius: 8px;
389
+ margin: 20px 0;
390
+ border: 1px solid #444;
391
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
392
+ }
393
+ .vehicle-profile label {
394
+ color: #ff4d4d;
395
+ font-weight: bold;
396
+ margin-bottom: 5px;
397
+ display: block;
398
+ }
399
+ .vehicle-profile input {
400
+ background: #3a3a3a;
401
+ color: #ffffff;
402
+ border: 1px solid #555;
403
+ border-radius: 5px;
404
+ padding: 10px;
405
+ width: 100%;
406
+ transition: border-color 0.3s;
407
+ }
408
+ .vehicle-profile input:focus {
409
+ border-color: #ff4d4d;
410
+ outline: none;
411
+ box-shadow: 0 0 5px rgba(255, 77, 77, 0.5);
412
+ }
413
+ /* Save button with hover animation */
414
+ .save-btn {
415
+ background: #ff4d4d !important;
416
+ color: #ffffff !important;
417
+ border: none !important;
418
+ padding: 12px !important;
419
+ border-radius: 5px !important;
420
+ font-weight: bold !important;
421
+ transition: transform 0.2s, background 0.3s !important;
422
+ width: 100% !important;
423
+ margin-top: 10px !important;
424
+ }
425
+ .save-btn:hover {
426
+ background: #e63939 !important;
427
+ transform: scale(1.05);
428
+ }
429
+ /* Chat container with car dashboard feel */
430
+ .chatbot-container {
431
+ background: #1f1f1f;
432
+ border: 2px solid #ff4d4d;
433
+ border-radius: 8px;
434
+ padding: 15px;
435
+ max-height: 400px;
436
+ overflow-y: auto;
437
+ margin-bottom: 20px;
438
+ box-shadow: inset 0 0 10px rgba(0, 0, 0, 0.5);
439
+ }
440
+ /* Chat messages with car-themed styling */
441
+ .chatbot-container .message-user div, .chatbot-container .message-assistant div {
442
+ padding: 10px !important;
443
+ border-radius: 8px !important;
444
+ margin-bottom: 10px !important;
445
+ max-width: 80% !important;
446
+ word-wrap: break-word !important;
447
+ }
448
+ .chatbot-container .message-user div {
449
+ background: #ff4d4d !important;
450
+ color: #ffffff !important;
451
+ margin-left: auto !important;
452
+ border: 1px solid #e63939 !important;
453
+ }
454
+ .chatbot-container .message-assistant div {
455
+ background: #ffffff !important;
456
+ color: #1a1a1a !important;
457
+ margin-right: auto !important;
458
+ border: 1px solid #d1d1d1 !important;
459
+ }
460
+ /* Chat input area */
461
+ .chatbot-container textarea {
462
+ background: #3a3a3a !important;
463
+ color: #ffffff !important;
464
+ border: 1px solid #555 !important;
465
+ border-radius: 5px !important;
466
+ padding: 10px !important;
467
+ transition: border-color 0.3s !important;
468
+ }
469
+ .chatbot-container textarea:focus {
470
+ border-color: #ff4d4d !important;
471
+ box-shadow: 0 0 5px rgba(255, 77, 77, 0.5) !important;
472
+ }
473
+ /* Send button with car icon */
474
+ .chatbot-container button {
475
+ background: #ff4d4d !important;
476
+ color: #ffffff !important;
477
+ border: none !important;
478
+ border-radius: 5px !important;
479
+ padding: 10px 20px !important;
480
+ font-weight: bold !important;
481
+ transition: transform 0.2s, background 0.3s !important;
482
+ }
483
+ .chatbot-container button:hover {
484
+ background: #e63939 !important;
485
+ transform: scale(1.05);
486
+ }
487
+ /* Navigation tabs for quick access */
488
+ .nav-tabs {
489
+ display: flex;
490
+ justify-content: space-around;
491
+ background: #2c2c2c;
492
+ padding: 10px 0;
493
+ border-radius: 8px;
494
+ margin-bottom: 20px;
495
+ border: 1px solid #444;
496
+ }
497
+ .nav-tabs button {
498
+ background: none;
499
+ border: none;
500
+ color: #d1d1d1;
501
+ font-weight: bold;
502
+ padding: 10px;
503
+ transition: color 0.3s;
504
+ display: flex;
505
+ align-items: center;
506
+ gap: 5px;
507
+ }
508
+ .nav-tabs button:hover {
509
+ color: #ff4d4d;
510
+ }
511
+ .nav-tabs button.active {
512
+ color: #ff4d4d;
513
+ border-bottom: 2px solid #ff4d4d;
514
+ }
515
+ /* Footer with subtle branding */
516
+ .footer {
517
+ text-align: center;
518
+ color: #d1d1d1;
519
+ font-size: 0.9em;
520
+ margin-top: 20px;
521
+ }
522
+ """
523
+ ) as demo:
524
+ # Header with car-themed branding
525
+ with gr.Row():
526
+ gr.Markdown(
527
+ """
528
+ <div class='header'>
529
+ <h1>πŸš— CarMaa - India's AI Car Doctor</h1>
530
+ <p>Diagnose issues, find garages, and connect with the car community!</p>
531
+ </div>
532
+ """
533
+ )
534
+
535
+ # Navigation tabs for quick access
536
  with gr.Row():
537
+ gr.Markdown(
538
+ """
539
+ <div class='nav-tabs'>
540
+ <button class='active'>🚘 Profile</button>
541
+ <button>πŸ”§ Diagnostics</button>
542
+ <button>πŸ—£οΈ Community</button>
543
+ </div>
544
+ """
545
+ )
546
+
547
+ # Vehicle profile inputs
548
+ with gr.Row(variant="panel", elem_classes="vehicle-profile"):
549
+ make_model = gr.Textbox(
550
+ label="Vehicle Make and Model",
551
+ placeholder="e.g., Maruti Alto"
552
+ )
553
+ year = gr.Textbox(
554
+ label="Year",
555
+ placeholder="e.g., 2020"
556
+ )
557
+ city = gr.Textbox(
558
+ label="City",
559
+ placeholder="e.g., Delhi"
560
+ )
561
  vehicle_profile = gr.State(value={"make_model": "", "year": "", "city": ""})
562
 
563
  # Update vehicle profile
564
  def update_vehicle_profile(make_model, year, city):
565
  return {"make_model": make_model, "year": year, "city": city}
566
 
567
+ gr.Button("Save Vehicle Profile", elem_classes="save-btn").click(
568
  fn=update_vehicle_profile,
569
  inputs=[make_model, year, city],
570
  outputs=vehicle_profile
571
  )
572
 
573
+ # Chat interface with enhanced styling
574
  chatbot = gr.ChatInterface(
575
  fn=respond,
576
+ additional_inputs=[vehicle_profile],
577
+ title="",
578
+ description="Ask about car diagnostics, garage locations, or community advice.",
579
+ theme="soft",
580
+ textbox=gr.Textbox(placeholder="Ask about your car... 🚘"),
581
+ submit_btn="Send πŸš€",
582
+ type="messages" # Updated to use the modern 'messages' format
583
+ )
584
+
585
+ # Footer
586
+ gr.Markdown(
587
+ """
588
+ <div class='footer'>
589
+ Powered by CarMaa Β© 2025 | Your Trusted Car Care Companion
590
+ </div>
591
+ """
592
  )
593
 
594
  if __name__ == "__main__":