Spaces:
Sleeping
Sleeping
Update inference.py
Browse files- inference.py +8 -8
inference.py
CHANGED
@@ -9,7 +9,7 @@ import time
|
|
9 |
import psutil
|
10 |
import platform
|
11 |
|
12 |
-
# ๐
|
13 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
14 |
|
15 |
# ๐ฆ Constants
|
@@ -18,7 +18,7 @@ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
|
18 |
model = None
|
19 |
last_mod_time = 0
|
20 |
|
21 |
-
# ๐
|
22 |
def load_model():
|
23 |
global model, last_mod_time
|
24 |
try:
|
@@ -34,7 +34,7 @@ def load_model():
|
|
34 |
model = None
|
35 |
return model
|
36 |
|
37 |
-
# ๐ฎ Evo
|
38 |
def evo_infer(query, options, user_context=""):
|
39 |
model = load_model()
|
40 |
if model is None:
|
@@ -59,8 +59,8 @@ def evo_infer(query, options, user_context=""):
|
|
59 |
context_str
|
60 |
)
|
61 |
|
62 |
-
# ๐ฌ GPT
|
63 |
-
def
|
64 |
try:
|
65 |
context_block = f"\n\nContext:\n{user_context}" if user_context else ""
|
66 |
response = openai.chat.completions.create(
|
@@ -72,7 +72,7 @@ def gpt_infer(query, user_context=""):
|
|
72 |
except Exception as e:
|
73 |
return f"โ ๏ธ GPT error:\n{str(e)}"
|
74 |
|
75 |
-
#
|
76 |
def evo_chat_predict(history, query, options):
|
77 |
context = "\n".join(history[-6:]) if history else ""
|
78 |
evo_ans, evo_score, evo_reason, evo_ctx = evo_infer(query, options, context)
|
@@ -83,7 +83,7 @@ def evo_chat_predict(history, query, options):
|
|
83 |
"context_used": evo_ctx
|
84 |
}
|
85 |
|
86 |
-
# ๐
|
87 |
def get_model_config():
|
88 |
return {
|
89 |
"num_layers": 6,
|
@@ -94,7 +94,7 @@ def get_model_config():
|
|
94 |
"accuracy": "~64.5%"
|
95 |
}
|
96 |
|
97 |
-
#
|
98 |
def get_system_stats():
|
99 |
gpu_info = torch.cuda.get_device_properties(0) if torch.cuda.is_available() else None
|
100 |
memory = psutil.virtual_memory()
|
|
|
9 |
import psutil
|
10 |
import platform
|
11 |
|
12 |
+
# ๐ Load OpenAI API Key
|
13 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
14 |
|
15 |
# ๐ฆ Constants
|
|
|
18 |
model = None
|
19 |
last_mod_time = 0
|
20 |
|
21 |
+
# ๐ Load Evo model
|
22 |
def load_model():
|
23 |
global model, last_mod_time
|
24 |
try:
|
|
|
34 |
model = None
|
35 |
return model
|
36 |
|
37 |
+
# ๐ฎ Evo inference
|
38 |
def evo_infer(query, options, user_context=""):
|
39 |
model = load_model()
|
40 |
if model is None:
|
|
|
59 |
context_str
|
60 |
)
|
61 |
|
62 |
+
# ๐ฌ GPT fallback (renamed properly to fix import)
|
63 |
+
def get_gpt_response(query, user_context=""):
|
64 |
try:
|
65 |
context_block = f"\n\nContext:\n{user_context}" if user_context else ""
|
66 |
response = openai.chat.completions.create(
|
|
|
72 |
except Exception as e:
|
73 |
return f"โ ๏ธ GPT error:\n{str(e)}"
|
74 |
|
75 |
+
# ๐ UI integration
|
76 |
def evo_chat_predict(history, query, options):
|
77 |
context = "\n".join(history[-6:]) if history else ""
|
78 |
evo_ans, evo_score, evo_reason, evo_ctx = evo_infer(query, options, context)
|
|
|
83 |
"context_used": evo_ctx
|
84 |
}
|
85 |
|
86 |
+
# ๐ Evo config
|
87 |
def get_model_config():
|
88 |
return {
|
89 |
"num_layers": 6,
|
|
|
94 |
"accuracy": "~64.5%"
|
95 |
}
|
96 |
|
97 |
+
# ๐ป System info
|
98 |
def get_system_stats():
|
99 |
gpu_info = torch.cuda.get_device_properties(0) if torch.cuda.is_available() else None
|
100 |
memory = psutil.virtual_memory()
|