|
import ollama |
|
import gradio as gr |
|
import os |
|
import subprocess |
|
result = subprocess.run(["ollama serve"], shell=True, capture_output=True, text=True) |
|
print(result) |
|
def query(q): |
|
while True: |
|
client = ollama.Client(host="localhost:11434") |
|
print(q) |
|
if not q: |
|
q="what is a dog?" |
|
print(q) |
|
|
|
query = q |
|
out="" |
|
r = client.chat( |
|
model='tinyllama', |
|
messages=[{'role': 'system', 'content': 'You are a Survival Expert'}], |
|
stream=True, |
|
) |
|
for chunk in r: |
|
chat_back = chunk['message']['content'] |
|
yield chat_back |
|
|
|
with gr.Blocks() as app: |
|
with gr.Row(): |
|
inp = gr.Textbox() |
|
sub = gr.Button() |
|
outp = gr.Textbox() |
|
sub.click(query,inp,outp) |
|
app.launch() |