Spaces:
Running
Running
from openai import OpenAI | |
import anthropic | |
import os | |
from dotenv import load_dotenv | |
from huggingface_hub import HfApi; | |
from huggingface_hub import InferenceClient | |
# Load environment variables from .env file | |
load_dotenv(override=True) | |
# Initialize disguised API clients | |
openai = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.3", token=openai_api_key) | |
claude = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.3", token=anthropic_api_key) | |
def get_gpt_completion(prompt, system_message): | |
try: | |
response = openai.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200) | |
return response | |
except Exception as e: | |
print(f"GPT error: {e}") | |
raise | |
def get_claude_completion(prompt, system_message): | |
try: | |
response = claude.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200) | |
return response | |
except Exception as e: | |
print(f"Claude error: {e}") | |
raise | |