baconnier commited on
Commit
95b6a1b
·
verified ·
1 Parent(s): 3b8058a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -34
app.py CHANGED
@@ -42,40 +42,44 @@ class ArtExplorer:
42
  return fig
43
 
44
  def get_llm_response(self, query: str, zoom_context: dict = None) -> dict:
45
- try:
46
- current_zoom_states = {
47
- "temporal": {"level": self.current_state["zoom_level"], "selection": ""},
48
- "geographical": {"level": self.current_state["zoom_level"], "selection": ""},
49
- "style": {"level": self.current_state["zoom_level"], "selection": ""},
50
- "subject": {"level": self.current_state["zoom_level"], "selection": ""}
51
- }
52
-
53
- if zoom_context:
54
- for key, value in zoom_context.items():
55
- if key in current_zoom_states:
56
- current_zoom_states[key]["selection"] = value
57
-
58
- messages=[
59
- {"role": "system", "content": "You are an expert art historian specializing in interactive exploration."},
60
- {"role": "user", "content": CONTEXTUAL_ZOOM_PROMPT.format(
61
- user_query=query,
62
- current_zoom_states=json.dumps(current_zoom_states, indent=2)
63
- )}
64
- ]
65
-
66
- print(messages)
67
- response = self.client.chat.completions.create(
68
- model="mixtral-8x7b-32768",
69
- messages=messages,
70
- temperature=0.1,
71
- max_tokens=2048
72
- )
73
- print(response)
74
- result = json.loads(response.choices[0].message.content)
75
- return result
76
- except Exception as e:
77
- print(f"Error in LLM response: {str(e)}")
78
- return self.get_default_response()
 
 
 
 
79
 
80
  def get_default_response(self):
81
  return CONTEXTUAL_ZOOM_default_response
 
42
  return fig
43
 
44
  def get_llm_response(self, query: str, zoom_context: dict = None) -> dict:
45
+ try:
46
+ current_zoom_states = {
47
+ "temporal": {"level": self.current_state["zoom_level"], "selection": ""},
48
+ "geographical": {"level": self.current_state["zoom_level"], "selection": ""},
49
+ "style": {"level": self.current_state["zoom_level"], "selection": ""},
50
+ "subject": {"level": self.current_state["zoom_level"], "selection": ""}
51
+ }
52
+
53
+ if zoom_context:
54
+ for key, value in zoom_context.items():
55
+ if key in current_zoom_states:
56
+ current_zoom_states[key]["selection"] = value
57
+
58
+ messages = [
59
+ {"role": "system", "content": "You are an expert art historian specializing in interactive exploration."},
60
+ {"role": "user", "content": CONTEXTUAL_ZOOM_PROMPT.format(
61
+ user_query=query,
62
+ current_zoom_states=json.dumps(current_zoom_states, indent=2)
63
+ )}
64
+ ]
65
+
66
+ response = self.client.chat.completions.create(
67
+ model="mixtral-8x7b-32768",
68
+ messages=messages,
69
+ temperature=0.1,
70
+ max_tokens=2048
71
+ )
72
+
73
+ result = json.loads(response.choices[0].message.content)
74
+ return result
75
+ except json.JSONDecodeError as json_err:
76
+ print(f"JSON decode error: {str(json_err)}")
77
+ print(f"Response content: {response.choices[0].message.content}")
78
+ return self.get_default_response()
79
+ except Exception as e:
80
+ print(f"Error in LLM response: {str(e)}")
81
+ return self.get_default_response()
82
+
83
 
84
  def get_default_response(self):
85
  return CONTEXTUAL_ZOOM_default_response