File size: 1,718 Bytes
ec83649 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import openai
import google.generativeai as genai
from config import (
OPENAI_API_KEY,
GEMINI_API_KEY,
OPENAI_DEFAULT_MODEL,
GEMINI_DEFAULT_MODEL
)
def configure_llms():
"""
Configure OpenAI and Gemini if keys are provided.
"""
if OPENAI_API_KEY:
openai.api_key = OPENAI_API_KEY
if GEMINI_API_KEY:
genai.configure(api_key=GEMINI_API_KEY)
def openai_chat(system_prompt, user_prompt, model=None, temperature=0.3):
"""
Call OpenAI ChatCompletion with a system + user message.
"""
if not OPENAI_API_KEY:
return "Error: OpenAI API key not provided."
chat_model = model or OPENAI_DEFAULT_MODEL
try:
response = openai.ChatCompletion.create(
model=chat_model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature=temperature
)
return response.choices[0].message["content"].strip()
except Exception as e:
return f"Error calling OpenAI: {str(e)}"
def gemini_chat(system_prompt, user_prompt, model_name=None, temperature=0.3):
"""
Call Google's PaLM2/Gemini via google.generativeai.
"""
if not GEMINI_API_KEY:
return "Error: Gemini API key not provided."
final_model = model_name or GEMINI_DEFAULT_MODEL
try:
model = genai.GenerativeModel(model_name=final_model)
chat_session = model.start_chat(history=[("system", system_prompt)])
reply = chat_session.send_message(user_prompt, temperature=temperature)
return reply.text
except Exception as e:
return f"Error calling Gemini: {str(e)}"
|