Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import gradio as gr | |
from huggingface_hub import hf_hub_download | |
from llama_cpp import Llama | |
from llama_cpp_agent import LlamaCppAgent | |
from llama_cpp_agent import MessagesFormatterType | |
from llama_cpp_agent.providers import LlamaCppPythonProvider | |
subprocess.run('pip install llama-cpp-python --no-cache-dir --force-reinstall --upgrade', env={'CMAKE_ARGS': "-DLLAMA_CUBLAS=ON", 'FORCE_CMAKE': '1'}, shell=True) | |
hf_hub_download(repo_id="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", filename="mistral-7b-instruct-v0.2.Q6_K.gguf") | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
llama_model = Llama(r"mistral-7b-instruct-v0.2.Q6_K.gguf", n_batch=1024, n_threads=0, n_gpu_layers=33, n_ctx=8192, verbose=False) | |
provider = LlamaCppPythonProvider(llama_model) | |
agent = LlamaCppAgent( | |
provider, | |
system_prompt=f"{system_message}", | |
predefined_messages_formatter_type=MessagesFormatterType.MISTRAL, | |
debug_output=True | |
) | |
settings = provider.get_provider_default_settings() | |
settings.stream = True | |
settings.max_tokens = max_tokens | |
settings.temperature = temperature | |
settings.top_p = top_p | |
agent_output = agent.get_chat_response(message, llm_sampling_settings=settings) | |
yield agent_output.strip() | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a helpful assistant.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p (nucleus sampling)", | |
), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() |