File size: 1,052 Bytes
4639901
6308d4e
e18adb9
 
db534bd
c6b2106
e18adb9
 
 
1dbcaa4
db534bd
 
1dbcaa4
db534bd
c6b2106
 
 
 
 
 
 
 
 
 
 
1dbcaa4
e18adb9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
import requests
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch

model_name = "Open-Orca/Mistral-7B-OpenOrca"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
chat = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")

prompt_url = "https://raw.githubusercontent.com/ALPERALL/AlpDroid/main/prompt.txt"
system_prompt = requests.get(prompt_url).text

def alp_droid_chat(user_input):
    full_prompt = f"{system_prompt}\n\nKullanıcı: {user_input}\nAlpDroid:"
    output = chat(full_prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_p=0.9)[0]["generated_text"]
    return output.split("AlpDroid:")[-1].strip()

app = gr.Interface(
    fn=alp_droid_chat,
    inputs=gr.Textbox(lines=4, placeholder="Sorunu yaz..."),
    outputs="text",
    title="AlpDroid - OpenOrca Mistral 7B",
    description="Kolay deploy, zahmetsiz AlpDroid."
)

app.launch()