Spaces:
Sleeping
Sleeping
import os | |
import requests | |
from transformers import pipeline | |
from flask import Flask, render_template, request, jsonify | |
app = Flask(__name__) | |
# Define model URL and local path | |
MODEL_URL = "https://huggingface.co/unsloth/Qwen3-4B-128K-GGUF/resolve/main/Qwen3-4B-128K-UD-IQ1_M.gguf" | |
MODEL_PATH = "Qwen3-4B-128K-UD-IQ1_M.gguf" | |
# Function to download the model | |
def download_model(): | |
if not os.path.exists(MODEL_PATH): | |
print("Downloading the model...") | |
response = requests.get(MODEL_URL, stream=True) | |
with open(MODEL_PATH, 'wb') as model_file: | |
for chunk in response.iter_content(chunk_size=128): | |
model_file.write(chunk) | |
print("Model downloaded successfully.") | |
# Load the model with Hugging Face Transformers pipeline | |
def load_model(): | |
download_model() | |
model = pipeline("text-generation", model=MODEL_PATH) | |
return model | |
# Global model object | |
model = load_model() | |
def index(): | |
return render_template('index.html') | |
def generate(): | |
user_input = request.form['message'] | |
response = model(user_input, max_length=100) | |
return jsonify({"response": response[0]['generated_text']}) | |
if __name__ == '__main__': | |
app.run(debug=True) | |