|
import openai
|
|
import google.generativeai as genai
|
|
from config import OPENAI_API_KEY, GEMINI_API_KEY, OPENAI_DEFAULT_MODEL, GEMINI_DEFAULT_MODEL
|
|
|
|
def configure_llms():
|
|
"""
|
|
Call this at startup or inside your main app file to configure
|
|
OpenAI and Gemini if keys are available.
|
|
"""
|
|
if OPENAI_API_KEY:
|
|
openai.api_key = OPENAI_API_KEY
|
|
if GEMINI_API_KEY:
|
|
genai.configure(api_key=GEMINI_API_KEY)
|
|
|
|
def openai_chat(system_prompt, user_prompt, model=None, temperature=0.3):
|
|
"""
|
|
Basic ChatCompletion with system + user roles for OpenAI.
|
|
"""
|
|
if not OPENAI_API_KEY:
|
|
return "Error: OpenAI API key not provided."
|
|
chat_model = model or OPENAI_DEFAULT_MODEL
|
|
try:
|
|
response = openai.ChatCompletion.create(
|
|
model=chat_model,
|
|
messages=[
|
|
{"role": "system", "content": system_prompt},
|
|
{"role": "user", "content": user_prompt}
|
|
],
|
|
temperature=temperature
|
|
)
|
|
return response.choices[0].message["content"].strip()
|
|
except Exception as e:
|
|
return f"Error calling OpenAI: {str(e)}"
|
|
|
|
def gemini_chat(system_prompt, user_prompt, model_name=None, temperature=0.3):
|
|
"""
|
|
Basic call to Google PaLM2 via google.generativeai.
|
|
"""
|
|
if not GEMINI_API_KEY:
|
|
return "Error: Gemini API key not provided."
|
|
final_model_name = model_name or GEMINI_DEFAULT_MODEL
|
|
try:
|
|
model = genai.GenerativeModel(model_name=final_model_name)
|
|
chat_session = model.start_chat(history=[("system", system_prompt)])
|
|
reply = chat_session.send_message(user_prompt, temperature=temperature)
|
|
return reply.text
|
|
except Exception as e:
|
|
return f"Error calling Gemini: {str(e)}"
|
|
|