Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
4 |
+
import pandas as pd
|
5 |
+
import plotly.express as px
|
6 |
+
import os
|
7 |
+
|
8 |
+
# --- 1. 模型加载 ---
|
9 |
+
# 负责同学: [填写负责这个模型的同学姓名,例如:张三]
|
10 |
+
# 注意:QuantFactory/Apollo2-7B-GGUF 模型通常不直接兼容 pipeline("text-generation", ...)
|
11 |
+
# 除非有额外的llama.cpp或特定的transformers加载配置。
|
12 |
+
# 为了演示和确保运行流畅,这里使用 gpt2-large 作为替代。
|
13 |
+
try:
|
14 |
+
model1_name = "gpt2-large" # 替代 QuantFactory/Apollo2-7B-GGUF 以确保兼容性
|
15 |
+
generator1 = pipeline("text-generation", model=model1_name, device=0 if torch.cuda.is_available() else -1)
|
16 |
+
print(f"✅ 模型 1 (文本生成: {model1_name}) 加载成功!")
|
17 |
+
except Exception as e:
|
18 |
+
print(f"❌ 模型 1 (文本生成: {model1_name}) 加载失败: {e}")
|
19 |
+
generator1 = None
|
20 |
+
|
21 |
+
# 负责同学: [填写负责这个模型的同学姓名,例如:李四]
|
22 |
+
# deepset/roberta-base-squad2 是一个问答模型,需要 context
|
23 |
+
try:
|
24 |
+
model2_name = "deepset/roberta-base-squad2"
|
25 |
+
qa_model = pipeline("question-answering", model=model2_name, device=0 if torch.cuda.is_available() else -1)
|
26 |
+
print(f"✅ 模型 2 (问答: {model2_name}) 加载成功!")
|
27 |
+
except Exception as e:
|
28 |
+
print(f"❌ 模型 2 (问答: {model2_name}) 加载失败: {e}")
|
29 |
+
qa_model = None
|