Lyon28 commited on
Commit
11eef4d
·
verified ·
1 Parent(s): 3f29839

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -37
app.py CHANGED
@@ -29,30 +29,35 @@ model_info = {
29
  "GPT-Neo": {"task": "text-generation", "description": "GPT-Neo model"},
30
  "Distil-GPT-2": {"task": "text-generation", "description": "Distilled GPT-2 model"},
31
  # --- MODEL EXTERNAL ---
32
- "Gemma-2B-IT": { # ID yang Anda inginkan di API Anda
33
  "task": "text-generation",
34
- "description": "Google's Gemma 2B Instruct model",
35
- "hf_model_name": "google/gemma-2b-it"
36
  },
37
- "Mistral-7B-Instruct": {
38
  "task": "text-generation",
39
- "description": "Mistral AI's Mistral 7B Instruct model",
40
- "hf_model_name": "mistralai/Mistral-7B-Instruct-v0.3",
41
  },
42
- "Qwen3-4B-RPG": {
43
  "task": "text-generation",
44
- "description": "Chun121's Qwen 4B RPG Roleplay model (Uncensored)",
45
- "hf_model_name": "Chun121/qwen3-4B-rpg-roleplay"
46
  },
47
- "Llama-3.2-Uncensored-3B": {
48
  "task": "text-generation",
49
- "description": "Dhirajlochib's Llama 3.2 Uncensored 3B",
50
- "hf_model_name": "dhirajlochib/llama-3.2-unsensored-3b"
51
  },
52
- "TinyLLama-NSFW-Chatbot": {
53
  "task": "text-generation",
54
- "description": "BilalRahib's TinyLLama NSFW Chatbot",
55
- "hf_model_name": "bilalRahib/TinyLLama-NSFW-Chatbot"
 
 
 
 
 
56
  }
57
  }
58
 
@@ -125,7 +130,7 @@ def list_available_models():
125
  def predict_with_model(model_id):
126
  """
127
  Endpoint utama untuk prediksi model.
128
- Menerima 'inputs' (teks) dan 'parameters' (dictionary) opsional.
129
  """
130
  logger.info(f"Menerima permintaan untuk model: {model_id}")
131
  if model_id not in model_info:
@@ -137,49 +142,51 @@ def predict_with_model(model_id):
137
  model_task = model_info[model_id]["task"]
138
 
139
  data = request.json
140
- inputs = data.get('inputs', '')
141
- parameters = data.get('parameters', {}) # Default ke dictionary kosong jika tidak ada
 
142
 
143
- if not inputs:
144
- return jsonify({"error": "Input 'inputs' tidak boleh kosong."}), 400
145
 
146
- logger.info(f"Inferensi: Model='{model_id}', Task='{model_task}', Input='{inputs[:100]}...', Params='{parameters}'")
147
 
148
  result = []
149
  # --- Penanganan Parameter dan Inferensi berdasarkan Tipe Tugas ---
150
  if model_task == "text-generation":
151
- # Default parameters for text-generation
152
  gen_params = {
153
- "max_new_tokens": parameters.get("max_new_tokens", 150), # Lebih banyak token untuk roleplay
154
  "temperature": parameters.get("temperature", 0.7),
155
  "do_sample": parameters.get("do_sample", True),
156
  "return_full_text": parameters.get("return_full_text", False), # Sangat penting untuk chatbot
157
  "num_return_sequences": parameters.get("num_return_sequences", 1),
158
  "top_k": parameters.get("top_k", 50),
159
  "top_p": parameters.get("top_p", 0.95),
160
- "repetition_penalty": parameters.get("repetition_penalty", 1.2), # Mencegah pengulangan
161
  }
162
- result = model_pipeline(inputs, **gen_params)
 
163
 
164
  elif model_task == "fill-mask":
165
  mask_params = {
166
  "top_k": parameters.get("top_k", 5)
167
  }
168
- result = model_pipeline(inputs, **mask_params)
 
 
169
 
170
- elif model_task == "text2text-generation": # Misalnya untuk T5
171
  t2t_params = {
172
  "max_new_tokens": parameters.get("max_new_tokens", 150),
173
  "temperature": parameters.get("temperature", 0.7),
174
  "do_sample": parameters.get("do_sample", True),
175
  }
176
- result = model_pipeline(inputs, **t2t_params)
177
 
178
  else:
179
- # Fallback for other tasks or if no specific parameters are needed
180
- result = model_pipeline(inputs, **parameters)
181
 
182
- # --- Konsistensi Format Output ---
183
  response_output = {}
184
  if model_task == "text-generation" or model_task == "text2text-generation":
185
  if result and len(result) > 0 and 'generated_text' in result[0]:
@@ -192,22 +199,18 @@ def predict_with_model(model_id):
192
  for p in result
193
  ]
194
  else:
195
- # Untuk jenis tugas lain, kembalikan hasil mentah
196
  response_output = result
197
 
198
  logger.info(f"Inferensi berhasil untuk '{model_id}'. Output singkat: '{str(response_output)[:200]}'")
199
- return jsonify({"model": model_id, "inputs": inputs, "outputs": response_output})
200
 
201
  except ValueError as ve:
202
- # Error yang berasal dari get_model_pipeline atau validasi input
203
  logger.error(f"Validasi atau konfigurasi error untuk model '{model_id}': {str(ve)}")
204
  return jsonify({"error": str(ve), "message": "Kesalahan konfigurasi atau input model."}), 400
205
  except RuntimeError as re:
206
- # Error saat memuat model
207
  logger.error(f"Error runtime saat memuat model '{model_id}': {str(re)}")
208
- return jsonify({"error": str(re), "message": "Model gagal dimuat."}), 503 # Service Unavailable
209
  except Exception as e:
210
- # Catch all other unexpected errors during prediction
211
  logger.error(f"Terjadi kesalahan tak terduga saat memprediksi dengan model '{model_id}': {str(e)}", exc_info=True)
212
  return jsonify({"error": str(e), "message": "Terjadi kesalahan internal server."}), 500
213
 
 
29
  "GPT-Neo": {"task": "text-generation", "description": "GPT-Neo model"},
30
  "Distil-GPT-2": {"task": "text-generation", "description": "Distilled GPT-2 model"},
31
  # --- MODEL EXTERNAL ---
32
+ "TinyLLama-NSFW-Chatbot": {
33
  "task": "text-generation",
34
+ "description": "BilalRahib's TinyLLama NSFW Chatbot",
35
+ "hf_model_name": "bilalRahib/TinyLLama-NSFW-Chatbot"
36
  },
37
+ "M-GPT": {
38
  "task": "text-generation",
39
+ "description": "ai-forever mGPT",
40
+ "hf_model_name": "ai-forever/mGPT"
41
  },
42
+ "Devstral-Small-2505": {
43
  "task": "text-generation",
44
+ "description": "mistralai Devstral-Small-2505",
45
+ "hf_model_name": "mistralai/Devstral-Small-2505"
46
  },
47
+ "Llama-3_3-Nemotron-Super-49B-GenRM-Multilingual": {
48
  "task": "text-generation",
49
+ "description": "nvidia Llama-3_3-Nemotron-Super-49B-GenRM-Multilingual",
50
+ "hf_model_name": "nvidia/Llama-3_3-Nemotron-Super-49B-GenRM-Multilingual"
51
  },
52
+ "Dhanishtha-2.0-preview": {
53
  "task": "text-generation",
54
+ "description": "HelpingAI Dhanishtha-2.0-preview",
55
+ "hf_model_name": "HelpingAI/Dhanishtha-2.0-preview"
56
+ },
57
+ "whisper-large-v3": {
58
+ "task": "automatic-speech-recognition",
59
+ "description": "openai whisper-large-v3",
60
+ "hf_model_name": "openai/whisper-large-v3"
61
  }
62
  }
63
 
 
130
  def predict_with_model(model_id):
131
  """
132
  Endpoint utama untuk prediksi model.
133
+ Menerima 'inputs' (teks pra-diformat) dan 'parameters' (dictionary) opsional.
134
  """
135
  logger.info(f"Menerima permintaan untuk model: {model_id}")
136
  if model_id not in model_info:
 
142
  model_task = model_info[model_id]["task"]
143
 
144
  data = request.json
145
+ # Input sekarang diharapkan sebagai fullPromptString dari frontend
146
+ full_prompt_string_from_frontend = data.get('inputs', '')
147
+ parameters = data.get('parameters', {})
148
 
149
+ if not full_prompt_string_from_frontend:
150
+ return jsonify({"error": "Input 'inputs' (full prompt string) tidak boleh kosong."}), 400
151
 
152
+ logger.info(f"Inferensi: Model='{model_id}', Task='{model_task}', Full Prompt='{full_prompt_string_from_frontend[:200]}...', Params='{parameters}'")
153
 
154
  result = []
155
  # --- Penanganan Parameter dan Inferensi berdasarkan Tipe Tugas ---
156
  if model_task == "text-generation":
 
157
  gen_params = {
158
+ "max_new_tokens": parameters.get("max_new_tokens", 150),
159
  "temperature": parameters.get("temperature", 0.7),
160
  "do_sample": parameters.get("do_sample", True),
161
  "return_full_text": parameters.get("return_full_text", False), # Sangat penting untuk chatbot
162
  "num_return_sequences": parameters.get("num_return_sequences", 1),
163
  "top_k": parameters.get("top_k", 50),
164
  "top_p": parameters.get("top_p", 0.95),
165
+ "repetition_penalty": parameters.get("repetition_penalty", 1.2),
166
  }
167
+ # Langsung berikan full_prompt_string_from_frontend ke pipeline
168
+ result = model_pipeline(full_prompt_string_from_frontend, **gen_params)
169
 
170
  elif model_task == "fill-mask":
171
  mask_params = {
172
  "top_k": parameters.get("top_k", 5)
173
  }
174
+ # Untuk fill-mask, input harus string biasa, bukan prompt yang kompleks
175
+ # Anda perlu memastikan frontend tidak mengirim prompt kompleks ke fill-mask model
176
+ result = model_pipeline(full_prompt_string_from_frontend, **mask_params)
177
 
178
+ elif model_task == "text2text-generation":
179
  t2t_params = {
180
  "max_new_tokens": parameters.get("max_new_tokens", 150),
181
  "temperature": parameters.get("temperature", 0.7),
182
  "do_sample": parameters.get("do_sample", True),
183
  }
184
+ result = model_pipeline(full_prompt_string_from_frontend, **t2t_params)
185
 
186
  else:
187
+ result = model_pipeline(full_prompt_string_from_frontend, **parameters)
 
188
 
189
+ # --- Konsistensi Format Output (tidak berubah dari update sebelumnya) ---
190
  response_output = {}
191
  if model_task == "text-generation" or model_task == "text2text-generation":
192
  if result and len(result) > 0 and 'generated_text' in result[0]:
 
199
  for p in result
200
  ]
201
  else:
 
202
  response_output = result
203
 
204
  logger.info(f"Inferensi berhasil untuk '{model_id}'. Output singkat: '{str(response_output)[:200]}'")
205
+ return jsonify({"model": model_id, "inputs": full_prompt_string_from_frontend, "outputs": response_output})
206
 
207
  except ValueError as ve:
 
208
  logger.error(f"Validasi atau konfigurasi error untuk model '{model_id}': {str(ve)}")
209
  return jsonify({"error": str(ve), "message": "Kesalahan konfigurasi atau input model."}), 400
210
  except RuntimeError as re:
 
211
  logger.error(f"Error runtime saat memuat model '{model_id}': {str(re)}")
212
+ return jsonify({"error": str(re), "message": "Model gagal dimuat."}), 503
213
  except Exception as e:
 
214
  logger.error(f"Terjadi kesalahan tak terduga saat memprediksi dengan model '{model_id}': {str(e)}", exc_info=True)
215
  return jsonify({"error": str(e), "message": "Terjadi kesalahan internal server."}), 500
216