Gregor Betz
commited on
logging
Browse files
app.py
CHANGED
@@ -88,18 +88,18 @@ CHATBOT_INSTRUCTIONS = (
|
|
88 |
with open("config.yaml") as stream:
|
89 |
try:
|
90 |
demo_config = yaml.safe_load(stream)
|
91 |
-
logging.
|
92 |
except yaml.YAMLError as exc:
|
93 |
-
logging.error(f"Error loading config: {exc}")
|
94 |
gr.Error("Error loading config: {exc}")
|
95 |
|
96 |
try:
|
97 |
client_kwargs, guide_kwargs = process_config(demo_config)
|
98 |
except Exception as exc:
|
99 |
-
logging.error(f"Error processing config: {exc}")
|
100 |
gr.Error(f"Error processing config: {exc}")
|
101 |
|
102 |
-
logging.info(f"Reasoning guide expert model is {guide_kwargs['expert_model']}.")
|
103 |
|
104 |
|
105 |
|
@@ -113,7 +113,7 @@ def setup_client_llm(**client_kwargs) -> LogitsModel | None:
|
|
113 |
try:
|
114 |
llm = create_logits_model(**client_kwargs)
|
115 |
except Exception as e:
|
116 |
-
logging.error(f"When setting up client llm: Error: {e}")
|
117 |
return False
|
118 |
return llm
|
119 |
|
@@ -151,9 +151,9 @@ async def bot(
|
|
151 |
|
152 |
if health_check.get("status", None) != "ok":
|
153 |
health_msg = " | ".join([f"{k}: {v}" for k, v in health_check.items()])
|
154 |
-
logging.error(f"Guide health check failed: {health_msg}")
|
155 |
gr.Error(f"LLM availability / health check failed: {health_msg}")
|
156 |
-
logging.info(f"Health check: {health_check}")
|
157 |
|
158 |
message = history[-1][0]
|
159 |
|
@@ -163,7 +163,7 @@ async def bot(
|
|
163 |
async for otype, ovalue in guide.guide(message):
|
164 |
logging.getLogger(__name__).info(f"Guide output: {otype.value} - {ovalue}")
|
165 |
if otype.value == "progress":
|
166 |
-
logging.info(f"Progress: {ovalue}")
|
167 |
gr.Info(ovalue, duration=12)
|
168 |
progress((progress_step,4))
|
169 |
progress_step += 1
|
|
|
88 |
with open("config.yaml") as stream:
|
89 |
try:
|
90 |
demo_config = yaml.safe_load(stream)
|
91 |
+
logging.getLogger(__name__).info(f"Config: {demo_config}")
|
92 |
except yaml.YAMLError as exc:
|
93 |
+
logging.getLogger(__name__).error(f"Error loading config: {exc}")
|
94 |
gr.Error("Error loading config: {exc}")
|
95 |
|
96 |
try:
|
97 |
client_kwargs, guide_kwargs = process_config(demo_config)
|
98 |
except Exception as exc:
|
99 |
+
logging.getLogger(__name__).error(f"Error processing config: {exc}")
|
100 |
gr.Error(f"Error processing config: {exc}")
|
101 |
|
102 |
+
logging.getLogger(__name__).info(f"Reasoning guide expert model is {guide_kwargs['expert_model']}.")
|
103 |
|
104 |
|
105 |
|
|
|
113 |
try:
|
114 |
llm = create_logits_model(**client_kwargs)
|
115 |
except Exception as e:
|
116 |
+
logging.getLogger(__name__).error(f"When setting up client llm: Error: {e}")
|
117 |
return False
|
118 |
return llm
|
119 |
|
|
|
151 |
|
152 |
if health_check.get("status", None) != "ok":
|
153 |
health_msg = " | ".join([f"{k}: {v}" for k, v in health_check.items()])
|
154 |
+
logging.getLogger(__name__).error(f"Guide health check failed: {health_msg}")
|
155 |
gr.Error(f"LLM availability / health check failed: {health_msg}")
|
156 |
+
logging.getLogger(__name__).info(f"Health check: {health_check}")
|
157 |
|
158 |
message = history[-1][0]
|
159 |
|
|
|
163 |
async for otype, ovalue in guide.guide(message):
|
164 |
logging.getLogger(__name__).info(f"Guide output: {otype.value} - {ovalue}")
|
165 |
if otype.value == "progress":
|
166 |
+
logging.getLogger(__name__).info(f"Progress: {ovalue}")
|
167 |
gr.Info(ovalue, duration=12)
|
168 |
progress((progress_step,4))
|
169 |
progress_step += 1
|