baconnier commited on
Commit
02a2c7a
·
verified ·
1 Parent(s): 9095529

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -44
app.py CHANGED
@@ -42,50 +42,54 @@ class ArtExplorer:
42
  )
43
  return fig
44
 
45
- def get_llm_response(self, query: str, zoom_context: dict = None) -> dict:
46
- try:
47
- current_zoom_states = {
48
- "temporal": {"level": self.current_state["zoom_level"], "selection": ""},
49
- "geographical": {"level": self.current_state["zoom_level"], "selection": ""},
50
- "style": {"level": self.current_state["zoom_level"], "selection": ""},
51
- "subject": {"level": self.current_state["zoom_level"], "selection": ""}
52
- }
53
-
54
- if zoom_context:
55
- for key, value in zoom_context.items():
56
- if key in current_zoom_states:
57
- current_zoom_states[key]["selection"] = value
58
-
59
- messages = [
60
- {"role": "system", "content": "You are an expert art historian specializing in interactive exploration."},
61
- {"role": "user", "content": CONTEXTUAL_ZOOM_PROMPT.format(
62
- user_query=query,
63
- current_zoom_states=json.dumps(current_zoom_states, indent=2)
64
- )}
65
- ]
66
-
67
- print("Messages sent to LLM:")
68
- print(messages)
69
-
70
- response = self.client.chat.completions.create(
71
- model="mixtral-8x7b-32768",
72
- messages=messages,
73
- temperature=0.1,
74
- max_tokens=2048
75
- )
76
-
77
- print("Raw response from LLM:")
78
- print(response)
79
-
80
- result = json.loads(response.choices[0].message.content)
81
-
82
- print("Parsed result:")
83
- print(result)
84
-
85
- return result
86
- except Exception as e:
87
- print(f"Error in LLM response: {str(e)}")
88
- return self.get_default_response()
 
 
 
 
89
 
90
  def get_default_response(self):
91
  return CONTEXTUAL_ZOOM_default_response
 
42
  )
43
  return fig
44
 
45
+ def get_llm_response(self, query: str, zoom_context: dict = None) -> dict:
46
+ try:
47
+ current_zoom_states = {
48
+ "temporal": {"level": self.current_state["zoom_level"], "selection": ""},
49
+ "geographical": {"level": self.current_state["zoom_level"], "selection": ""},
50
+ "style": {"level": self.current_state["zoom_level"], "selection": ""},
51
+ "subject": {"level": self.current_state["zoom_level"], "selection": ""}
52
+ }
53
+
54
+ if zoom_context:
55
+ for key, value in zoom_context.items():
56
+ if key in current_zoom_states:
57
+ current_zoom_states[key]["selection"] = value
58
+
59
+ messages = [
60
+ {"role": "system", "content": "You are an expert art historian specializing in interactive exploration."},
61
+ {"role": "user", "content": CONTEXTUAL_ZOOM_PROMPT.format(
62
+ user_query=query,
63
+ current_zoom_states=json.dumps(current_zoom_states, indent=2)
64
+ )}
65
+ ]
66
+
67
+ print("Messages sent to LLM:")
68
+ print(messages)
69
+
70
+ response = self.client.chat.completions.create(
71
+ model="mixtral-8x7b-32768",
72
+ messages=messages,
73
+ temperature=0.1,
74
+ max_tokens=2048
75
+ )
76
+
77
+ print("Raw response from LLM:")
78
+ print(response)
79
+
80
+ result = json.loads(response.choices[0].message.content)
81
+
82
+ print("Parsed result:")
83
+ print(result)
84
+
85
+ return result
86
+ except json.JSONDecodeError as e:
87
+ print(f"JSON decode error: {str(e)}")
88
+ print(f"Response content: {response.choices[0].message.content if 'response' in locals() else 'No response'}")
89
+ return self.get_default_response()
90
+ except Exception as e:
91
+ print(f"Error in LLM response: {str(e)}")
92
+ return self.get_default_response()
93
 
94
  def get_default_response(self):
95
  return CONTEXTUAL_ZOOM_default_response