from openai import OpenAI, AsyncOpenAI from dotenv import load_dotenv import os load_dotenv() class ChatOpenAI: def __init__(self, model_name: str = "gpt-4"): self.model_name = model_name self.openai_api_key = os.getenv("OPENAI_API_KEY") if self.openai_api_key is None: raise ValueError("OPENAI_API_KEY is not set") self.max_tokens = 8192 # Maximum tokens for response self.max_total_tokens = 16384 # Maximum total tokens (prompt + response) def run(self, messages, text_only: bool = True, **kwargs): if not isinstance(messages, list): raise ValueError("messages must be a list") client = OpenAI() try: response = client.chat.completions.create( model=self.model_name, messages=messages, max_tokens=self.max_tokens, temperature=0.7, # Add some creativity while maintaining accuracy **kwargs ) if text_only: return response.choices[0].message.content return response except Exception as e: print(f"Error in chat completion: {str(e)}") raise async def astream(self, messages, **kwargs): if not isinstance(messages, list): raise ValueError("messages must be a list") client = AsyncOpenAI() try: stream = await client.chat.completions.create( model=self.model_name, messages=messages, max_tokens=self.max_tokens, temperature=0.7, # Add some creativity while maintaining accuracy stream=True, **kwargs ) async for chunk in stream: content = chunk.choices[0].delta.content if content is not None: yield content except Exception as e: print(f"Error in chat completion stream: {str(e)}") raise