import gradio as gr import requests from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import torch model_name = "TheBloke/SOLAR-10.7B-Instruct-v1.0-AWQ" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16) chat = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto") # AlpDroid prompt prompt_url = "https://raw.githubusercontent.com/ALPERALL/AlpDroid/main/prompt.txt" system_prompt = requests.get(prompt_url).text def alp_droid_chat(user_input): full = f"{system_prompt}\n\nKullanıcı: {user_input}\nAlpDroid:" res = chat(full, max_new_tokens=256, do_sample=True, temperature=0.7, top_p=0.9)[0]["generated_text"] return res.split("AlpDroid:")[-1].strip() app = gr.Interface(alp_droid_chat, inputs=gr.Textbox(lines=4, placeholder="Sorunu yaz..."), outputs="text", title="AlpDroid (SOLAR‑10.7B)", description="TheBloke'un instruct modeliyle çalışan AlpDroid.") app.launch()