naman1102 commited on
Commit
f8f998e
·
1 Parent(s): 093dbde

fixed_openaiAPI

Browse files
Files changed (4) hide show
  1. analyzer.py +5 -5
  2. app.py +1 -1
  3. chatbot_page.py +2 -2
  4. repo_explorer.py +1 -1
analyzer.py CHANGED
@@ -26,7 +26,7 @@ def analyze_code(code: str) -> str:
26
  "{\n 'strength': '...', \n 'weaknesses': '...', \n 'speciality': '...', \n 'relevance rating': 'high'\n}"
27
  )
28
  response = client.chat.completions.create(
29
- model="openai/gpt-4.1-nano", # Updated to GPT-4.1 Nano model
30
  messages=[
31
  {"role": "system", "content": system_prompt},
32
  {"role": "user", "content": code}
@@ -254,7 +254,7 @@ def analyze_code_chunk(code: str, user_requirements: str = "") -> str:
254
  )
255
 
256
  response = client.chat.completions.create(
257
- model="openai/gpt-4.1-nano",
258
  messages=[
259
  {"role": "system", "content": chunk_prompt},
260
  {"role": "user", "content": code}
@@ -288,7 +288,7 @@ def aggregate_chunk_analyses(chunk_jsons: list, user_requirements: str = "") ->
288
  )
289
  user_content = "Here are the chunk analyses:\n" + "\n".join(chunk_jsons)
290
  response = client.chat.completions.create(
291
- model="openai/gpt-4.1-nano",
292
  messages=[
293
  {"role": "system", "content": aggregation_prompt},
294
  {"role": "user", "content": user_content}
@@ -344,7 +344,7 @@ Repository chunk:
344
  Provide a clear, conversational summary in 2-3 paragraphs:"""
345
 
346
  response = client.chat.completions.create(
347
- model="openai/gpt-4.1-nano",
348
  messages=[
349
  {"role": "system", "content": "You are an expert code analyst creating conversational summaries for a repository assistant chatbot."},
350
  {"role": "user", "content": context_prompt}
@@ -397,7 +397,7 @@ Create a well-structured overview covering:
397
  Make this comprehensive but conversational - it will be used by a chatbot to answer user questions about the repository."""
398
 
399
  response = client.chat.completions.create(
400
- model="openai/gpt-4.1-nano",
401
  messages=[
402
  {"role": "system", "content": "You are creating a comprehensive repository summary for a chatbot assistant."},
403
  {"role": "user", "content": final_prompt}
 
26
  "{\n 'strength': '...', \n 'weaknesses': '...', \n 'speciality': '...', \n 'relevance rating': 'high'\n}"
27
  )
28
  response = client.chat.completions.create(
29
+ model="gpt-4.1-nano", # Updated model ID
30
  messages=[
31
  {"role": "system", "content": system_prompt},
32
  {"role": "user", "content": code}
 
254
  )
255
 
256
  response = client.chat.completions.create(
257
+ model="gpt-4.1-nano",
258
  messages=[
259
  {"role": "system", "content": chunk_prompt},
260
  {"role": "user", "content": code}
 
288
  )
289
  user_content = "Here are the chunk analyses:\n" + "\n".join(chunk_jsons)
290
  response = client.chat.completions.create(
291
+ model="gpt-4.1-nano",
292
  messages=[
293
  {"role": "system", "content": aggregation_prompt},
294
  {"role": "user", "content": user_content}
 
344
  Provide a clear, conversational summary in 2-3 paragraphs:"""
345
 
346
  response = client.chat.completions.create(
347
+ model="gpt-4.1-nano",
348
  messages=[
349
  {"role": "system", "content": "You are an expert code analyst creating conversational summaries for a repository assistant chatbot."},
350
  {"role": "user", "content": context_prompt}
 
397
  Make this comprehensive but conversational - it will be used by a chatbot to answer user questions about the repository."""
398
 
399
  response = client.chat.completions.create(
400
+ model="gpt-4.1-nano",
401
  messages=[
402
  {"role": "system", "content": "You are creating a comprehensive repository summary for a chatbot assistant."},
403
  {"role": "user", "content": final_prompt}
app.py CHANGED
@@ -128,7 +128,7 @@ Selected repositories:"""
128
  # client.base_url = os.getenv("base_url")
129
 
130
  response = client.chat.completions.create(
131
- model="openai/gpt-4.1-nano",
132
  messages=[
133
  {"role": "system", "content": "You are an expert at analyzing and ranking repositories based on user requirements. Always return valid JSON."},
134
  {"role": "user", "content": prompt}
 
128
  # client.base_url = os.getenv("base_url")
129
 
130
  response = client.chat.completions.create(
131
+ model="gpt-4.1-nano",
132
  messages=[
133
  {"role": "system", "content": "You are an expert at analyzing and ranking repositories based on user requirements. Always return valid JSON."},
134
  {"role": "user", "content": prompt}
chatbot_page.py CHANGED
@@ -28,7 +28,7 @@ def chat_with_user(user_message, history):
28
  messages.append({"role": "assistant", "content": msg[1]})
29
  messages.append({"role": "user", "content": user_message})
30
  response = client.chat.completions.create(
31
- model="openai/gpt-4.1-nano",
32
  messages=messages,
33
  max_tokens=256,
34
  temperature=0.7
@@ -54,7 +54,7 @@ def extract_keywords_from_conversation(history):
54
  "Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search."
55
  )
56
  response = client.chat.completions.create(
57
- model="openai/gpt-4.1-nano",
58
  messages=[
59
  {"role": "system", "content": system_prompt},
60
  {"role": "user", "content": user_prompt}
 
28
  messages.append({"role": "assistant", "content": msg[1]})
29
  messages.append({"role": "user", "content": user_message})
30
  response = client.chat.completions.create(
31
+ model="gpt-4.1-nano",
32
  messages=messages,
33
  max_tokens=256,
34
  temperature=0.7
 
54
  "Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search."
55
  )
56
  response = client.chat.completions.create(
57
+ model="gpt-4.1-nano",
58
  messages=[
59
  {"role": "system", "content": system_prompt},
60
  {"role": "user", "content": user_prompt}
repo_explorer.py CHANGED
@@ -278,7 +278,7 @@ Answer the user's question based on your comprehensive knowledge of this reposit
278
  client = OpenAI(api_key=os.getenv("OpenAI_API"))
279
 
280
  response = client.chat.completions.create(
281
- model="openai/gpt-4.1-nano",
282
  messages=[
283
  {"role": "system", "content": repo_system_prompt},
284
  {"role": "user", "content": user_message}
 
278
  client = OpenAI(api_key=os.getenv("OpenAI_API"))
279
 
280
  response = client.chat.completions.create(
281
+ model="gpt-4.1-nano",
282
  messages=[
283
  {"role": "system", "content": repo_system_prompt},
284
  {"role": "user", "content": user_message}