ReyDev's picture
🔧 refactor(ai.py, app.py): optimize imports and improve code readability
74dace5 unverified
raw
history blame
5.56 kB
import re
import gradio as gr
from anthropic import AI_PROMPT, HUMAN_PROMPT
from gradio.components import Checkbox, Dropdown, IOComponent, Textbox
from claude_space.ai import AnthropicCustom
from claude_space.const import ClaudeModels, ModelTokenLength, Prompts
from claude_space.settings import settings
conversation_history = ""
async def interact_with_ai(
user_question, token, model, token_length, prompt, prompt_input, memory
):
global conversation_history
if token is None or token == "" and settings.ANTHROPIC_API_KEY is None:
raise TypeError(
"You have not provided an API key. Please provide one in the textbox."
)
if memory:
prompt = Prompts[prompt].value.format(
memory=conversation_history, question=user_question
)
else:
prompt = Prompts[prompt].value.format(memory="", question=user_question)
if prompt_input != re.search(r"Human: (.*?) \n\nConversations:", prompt).group(1):
prompt = re.sub(
r"Human: (.*?) \n\nConversations:",
f"Human: {prompt_input} \n\nConversations:",
prompt,
)
anth = AnthropicCustom(
api_key=token, model=model, max_tokens=token_length, prompt=prompt
)
response_accumulated = ""
async for response in anth.get_anthropic_response_async():
response_accumulated += response
conversation_history = f"{conversation_history} {HUMAN_PROMPT} {user_question} {AI_PROMPT} {response_accumulated}"
yield response_accumulated
async def chat_with_ai(
message,
history,
token,
model,
token_length,
prompt,
prompt_input,
memory,
):
global conversation_history
if token is None or token == "" and settings.ANTHROPIC_API_KEY is None:
raise TypeError(
"You have not provided an API key. Please provide one in the textbox."
)
if memory:
for conversation in history:
user_question, response_accumulated = conversation
conversation_history = f"{conversation_history} {HUMAN_PROMPT} {user_question} {AI_PROMPT} {response_accumulated}"
prompt = Prompts[prompt].value.format(memory=history, question=message)
else:
prompt = Prompts[prompt].value.format(memory="", question=message)
if prompt_input != re.search(r"Human: (.*?) \n\nConversations:", prompt).group(1):
prompt = re.sub(
r"Human: (.*?) \n\nConversations:",
f"Human: {prompt_input} \n\nConversations:",
prompt,
)
anth = AnthropicCustom(
api_key=token, model=model, max_tokens=token_length, prompt=prompt
)
response_accumulated = ""
async for response in anth.get_anthropic_response_async():
response_accumulated += response
yield response_accumulated
promptDropdown: IOComponent = Dropdown(
choices=list(Prompts.__members__.keys()),
label="Prompt",
value=list(Prompts.__members__.keys())[0],
)
prompt_input: IOComponent = Textbox(
label="Custom Prompt",
placeholder="Enter a custom prompt here",
lines=3,
value=re.search(
r"Human: (.*?) \n\nConversations:", Prompts[promptDropdown.value].value
).group(1),
)
iface = gr.Interface(
fn=interact_with_ai,
flagging_options=["Inappropriate", "Disrespectful", "Spam"],
allow_flagging="auto",
title="Claude Space",
inputs=[
Textbox(label="Question", placeholder="Enter a question here"),
Textbox(
label="Token",
info="You'll get this token from Anthropic console and this is mandatory",
placeholder="Enter a token here",
type="password",
),
Dropdown(
choices=[model.value for model in ClaudeModels],
label="Model",
value=[model.value for model in ClaudeModels][0],
),
Dropdown(
choices=[token.value for token in ModelTokenLength],
label="Token Length",
value=[token.value for token in ModelTokenLength][0],
),
promptDropdown,
prompt_input,
Checkbox(label="Memory", value=False),
],
outputs="markdown",
cache_examples=True,
)
promptDropdown: IOComponent = Dropdown(
choices=list(Prompts.__members__.keys()),
label="Prompt",
value=list(Prompts.__members__.keys())[0],
)
prompt_input: IOComponent = Textbox(
label="Custom Prompt",
placeholder="Enter a custom prompt here",
lines=3,
value=re.search(
r"Human: (.*?) \n\nConversations:", Prompts[promptDropdown.value].value
).group(1),
)
cface = gr.ChatInterface(
fn=chat_with_ai,
additional_inputs=[
Textbox(label="Token", placeholder="Enter a token here", type="password"),
Dropdown(
choices=[model.value for model in ClaudeModels],
label="Model",
value=[model.value for model in ClaudeModels][0],
),
Dropdown(
choices=[token.value for token in ModelTokenLength],
label="Token Length",
value=[token.value for token in ModelTokenLength][0],
),
promptDropdown,
prompt_input,
Checkbox(label="Memory", value=True),
],
)
gd = gr.TabbedInterface(
[iface, cface], tab_names=["Claude Space", "Claude Chat"], title="Claude Space"
)
gd.queue(concurrency_count=75, max_size=100).launch(
debug=True,
share=False,
server_name="0.0.0.0",
server_port=7860,
show_error=True,
show_tips=True,
)