Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,9 +8,7 @@ from langdetect import detect
|
|
8 |
# Проверяем наличие текстовых файлов и читаем их
|
9 |
def load_text_files():
|
10 |
files = {
|
11 |
-
"vampires": "vampires.txt"
|
12 |
-
"werewolves": "werewolves.txt",
|
13 |
-
"humans": "humans.txt"
|
14 |
}
|
15 |
|
16 |
loaded_data = {}
|
@@ -66,22 +64,16 @@ def create_knowledge_base(text_data, embed_fn):
|
|
66 |
|
67 |
return collection
|
68 |
|
69 |
-
# Инициализация модели для ответов
|
70 |
def initialize_llm_model():
|
71 |
-
from transformers import
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
76 |
-
|
77 |
-
pipe = pipeline(
|
78 |
"text-generation",
|
79 |
-
model=
|
80 |
-
|
81 |
-
device="cpu"
|
82 |
)
|
83 |
-
|
84 |
-
return pipe
|
85 |
|
86 |
# Поиск релевантной информации
|
87 |
def find_relevant_info(question, collection, embed_fn, n_results=3):
|
@@ -106,12 +98,11 @@ def generate_response(question, context, llm_pipe):
|
|
106 |
|
107 |
output = llm_pipe(
|
108 |
prompt,
|
109 |
-
max_new_tokens=
|
110 |
do_sample=True,
|
111 |
temperature=0.7,
|
112 |
top_p=0.9,
|
113 |
-
repetition_penalty=1.2
|
114 |
-
eos_token_id=2
|
115 |
)
|
116 |
|
117 |
return output[0]["generated_text"][len(prompt):].strip()
|
|
|
8 |
# Проверяем наличие текстовых файлов и читаем их
|
9 |
def load_text_files():
|
10 |
files = {
|
11 |
+
"vampires": "vampires.txt"
|
|
|
|
|
12 |
}
|
13 |
|
14 |
loaded_data = {}
|
|
|
64 |
|
65 |
return collection
|
66 |
|
67 |
+
# Инициализация модели для ответов (упрощенная версия)
|
68 |
def initialize_llm_model():
|
69 |
+
from transformers import pipeline
|
70 |
|
71 |
+
# Используем меньшую модель для Hugging Face Spaces
|
72 |
+
return pipeline(
|
|
|
|
|
|
|
73 |
"text-generation",
|
74 |
+
model="IlyaGusev/saiga_llama3_8b",
|
75 |
+
device_map="auto"
|
|
|
76 |
)
|
|
|
|
|
77 |
|
78 |
# Поиск релевантной информации
|
79 |
def find_relevant_info(question, collection, embed_fn, n_results=3):
|
|
|
98 |
|
99 |
output = llm_pipe(
|
100 |
prompt,
|
101 |
+
max_new_tokens=256,
|
102 |
do_sample=True,
|
103 |
temperature=0.7,
|
104 |
top_p=0.9,
|
105 |
+
repetition_penalty=1.2
|
|
|
106 |
)
|
107 |
|
108 |
return output[0]["generated_text"][len(prompt):].strip()
|