YOUSEF2434 commited on
Commit
0471b47
·
verified ·
1 Parent(s): 3ac800e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from transformers import pipeline
4
+ from flask import Flask, render_template, request, jsonify
5
+
6
+ app = Flask(__name__)
7
+
8
+ # Define model URL and local path
9
+ MODEL_URL = "https://huggingface.co/unsloth/Qwen3-4B-128K-GGUF/resolve/main/Qwen3-4B-128K-UD-IQ1_M.gguf"
10
+ MODEL_PATH = "Qwen3-4B-128K-UD-IQ1_M.gguf"
11
+
12
+ # Function to download the model
13
+ def download_model():
14
+ if not os.path.exists(MODEL_PATH):
15
+ print("Downloading the model...")
16
+ response = requests.get(MODEL_URL, stream=True)
17
+ with open(MODEL_PATH, 'wb') as model_file:
18
+ for chunk in response.iter_content(chunk_size=128):
19
+ model_file.write(chunk)
20
+ print("Model downloaded successfully.")
21
+
22
+ # Load the model with Hugging Face Transformers pipeline
23
+ def load_model():
24
+ download_model()
25
+ model = pipeline("text-generation", model=MODEL_PATH)
26
+ return model
27
+
28
+ # Global model object
29
+ model = load_model()
30
+
31
+ @app.route('/')
32
+ def index():
33
+ return render_template('index.html')
34
+
35
+ @app.route('/generate', methods=['POST'])
36
+ def generate():
37
+ user_input = request.form['message']
38
+ response = model(user_input, max_length=100)
39
+ return jsonify({"response": response[0]['generated_text']})
40
+
41
+ if __name__ == '__main__':
42
+ app.run(debug=True)