Spaces:
Runtime error
Runtime error
import openai | |
from openai import OpenAI | |
class Answering_Agent: | |
def __init__(self, openai_api_key) -> None: | |
self.openai_client = openai | |
openai.api_key = openai_api_key | |
def get_document_content(self, doc_id): | |
# Placeholder for retrieving document content | |
return "Document content for ID " + doc_id | |
def is_relevant(self, query, context_texts, history_str): | |
# Simple relevance check based on keyword presence | |
keywords = query.lower().split() | |
context = (context_texts + " " + history_str).lower() | |
return any(keyword in context for keyword in keywords) | |
def generate_response(self, query, docs, conv_history, k=5, mode="chatty"): | |
# Concatenate the contents of the top k relevant documents | |
context_texts = "\n\n".join([f"Context {idx + 1}: {result[2]}" for idx, result in enumerate(docs)]) | |
history_str = "\n".join([f"{turn['role']}: {turn['content']}" for turn in conv_history]) if conv_history else "" | |
print("context_texts: " + context_texts) | |
# Check relevance of the context and history to the query | |
relevant = self.is_relevant(query, context_texts, history_str) | |
# If not relevant, return a predefined message | |
if not relevant: | |
return "No relevant documents found in the documents. Please ask a relevant question to the book on Machine Learning." | |
# Formulate the prompt, incorporating conversation history if present | |
conversation_history = f'Conversation:\n{history_str}\n' if conv_history else '' | |
prompt = f"Based on the following documents{' and conversation history' if conv_history else ''}, answer the query:\nDocuments:\n{context_texts}\n{conversation_history}Query: {query}\nAnswer:" | |
if mode == "chatty": | |
prompt += " Please provide a detailed and comprehensive response that includes background information, relevant examples, and any important distinctions or perspectives related to the topic. Where possible, include step-by-step explanations or descriptions to ensure clarity and depth in your answer." | |
# Configure max_tokens and temperature based on the specified mode | |
# a longer response | |
max_tokens = 3500 if mode == "chatty" else 1000 | |
temperature = 0.9 if mode == "chatty" else 0.5 | |
# generate the response | |
client = OpenAI(api_key=openai.api_key) | |
message = {"role": "user", "content": prompt} | |
response = client.chat.completions.create( | |
model="gpt-3.5-turbo", | |
messages=[message], | |
max_tokens=max_tokens, | |
temperature=temperature, | |
stop=["\n", "Query:"] | |
) | |
return response.choices[0].message.content | |