alperall commited on
Commit
d68abd2
·
verified ·
1 Parent(s): cd128f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -25
app.py CHANGED
@@ -1,27 +1,6 @@
1
  import gradio as gr
2
- import requests
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
- import torch
5
 
6
- model_name = "Open-Orca/Mistral-7B-OpenOrca"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
9
- chat = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
10
-
11
- prompt_url = "https://raw.githubusercontent.com/ALPERALL/AlpDroid/main/prompt.txt"
12
- system_prompt = requests.get(prompt_url).text
13
-
14
- def alp_droid_chat(user_input):
15
- full_prompt = f"{system_prompt}\n\nKullanıcı: {user_input}\nAlpDroid:"
16
- output = chat(full_prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_p=0.9)[0]["generated_text"]
17
- return output.split("AlpDroid:")[-1].strip()
18
-
19
- app = gr.Interface(
20
- fn=alp_droid_chat,
21
- inputs=gr.Textbox(lines=4, placeholder="Sorunu yaz..."),
22
- outputs="text",
23
- title="AlpDroid - OpenOrca Mistral 7B",
24
- description="Kolay deploy, zahmetsiz AlpDroid."
25
- )
26
-
27
- app.launch()
 
1
  import gradio as gr
 
 
 
2
 
3
+ gr.load(
4
+ "models/mistralai/Mistral-7B-Instruct-v0.3",
5
+ provider="together",
6
+ ).launch()