File size: 1,511 Bytes
0471b47
 
e355996
 
 
8d0df65
 
e355996
 
 
 
 
 
 
 
 
 
 
cda8406
e355996
 
cda8406
 
 
e355996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cda8406
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import os
import requests
import gradio as gr
from llama_cpp import Llama

MODEL_URL = "https://huggingface.co/QuantFactory/Ministral-3b-instruct-GGUF/resolve/main/Ministral-3b-instruct.Q4_1.gguf?download=true"  # truncated for clarity
MODEL_PATH = "Ministral-3b-instruct.Q4_1.gguf"

# Download model if not already downloaded
if not os.path.exists(MODEL_PATH):
    print("Downloading model...")
    with requests.get(MODEL_URL, stream=True) as r:
        r.raise_for_status()
        with open(MODEL_PATH, 'wb') as f:
            for chunk in r.iter_content(chunk_size=8192):
                f.write(chunk)
    print("Model downloaded.")

# Load the model with adjustments for CPU
llm = Llama(
    model_path=MODEL_PATH,
    n_ctx=4096,  # Reduced context window size
    n_threads=2,  # Reduced threads for CPU use
    n_gpu_layers=0,  # Set to 0 since we're using CPU
    chat_format="chatml"
)

def chat_interface(message, history):
    if history is None:
        history = []

    chat_prompt = []
    for user_msg, bot_msg in history:
        chat_prompt.append({"role": "user", "content": user_msg})
        chat_prompt.append({"role": "assistant", "content": bot_msg})
    chat_prompt.append({"role": "user", "content": message})

    response = llm.create_chat_completion(messages=chat_prompt, stream=False)
    reply = response["choices"][0]["message"]["content"]
    history.append((message, reply))
    return reply, history

gr.ChatInterface(fn=chat_interface, title="Ministral 3B Chat").launch()