1hangzhao commited on
Commit
89b4f25
·
verified ·
1 Parent(s): ae96d97

Update ontochat/functions.py

Browse files
Files changed (1) hide show
  1. ontochat/functions.py +70 -74
ontochat/functions.py CHANGED
@@ -105,13 +105,6 @@ current_preidentified = [
105
  5. The language is concise, precise, and written in a professional tone.
106
  6. The final refined answer is structured as bullet points, with each outcome represented as a separate bullet.""",
107
  ]
108
-
109
-
110
-
111
-
112
-
113
- def load_example(selection):
114
- return current_preidentified[selection]
115
 
116
  def set_openai_api_key(api_key: str):
117
  global openai_api_key
@@ -186,70 +179,73 @@ def user_story_generator(message, history):
186
 
187
  return history, ""
188
 
189
- def cq_generator(message, history):
190
- check_api_key()
191
- instructions = [{
192
- "role": "assistant",
193
- "content": "You are a conversational ontology engineering assistant."
194
- }, {
195
- "role": "user",
196
- "content": "Here are instructions for you on how to generate high-quality competency questions. First, here "
197
- "are some good examples of competency questions generated from example data. Who performs the song? "
198
- "from the data Yesterday was performed by Armando Rocca, When (what year) was the building built? "
199
- "from the data The Church was built in 1619, In which context is the building located? from the "
200
- "data The Church is located in a periurban context. Second, how to make them less complex. Take the "
201
- "generated competency questions and check if any of them can be divided into multiple questions. If "
202
- "they do, split the competency question into multiple competency questions. If it does not, leave "
203
- "the competency question as it is. For example, the competency question Who wrote The Hobbit and in "
204
- "what year was the book written? must be split into two competency questions: Who wrote the book? "
205
- "and In what year was the book written?. Another example is the competency question, When was the "
206
- "person born?. This competency question cannot be divided into multiple questions. Third, how to "
207
- "remove real entities to abstract them. Take the competency questions and check if they contain "
208
- "real-world entities, like Freddy Mercury or 1837. If they do, change those real-world entities "
209
- "from these competency questions to more general concepts. For example, the competency question "
210
- "Which is the author of Harry Potter? should be changed to Which is the author of the book?. "
211
- "Similarly, the competency question Who wrote the book in 2018? should be changed to Who wrote the "
212
- "book, and in what year was the book written?"
213
- }]
214
- messages = build_messages(history)
215
- messages.append({
216
- "role": "user",
217
- "content": message
218
- })
219
- bot_message = chat_completion(openai_api_key, instructions + messages)
220
- history.append([message, bot_message])
221
- return bot_message, history, ""
222
-
223
- def load_example_user_story():
224
- with open("data/Linka#1_MusicKnowledge.md", "r") as f:
225
- return f.read()
226
-
227
- def clustering_generator(cqs, cluster_method, n_clusters):
228
- check_api_key()
229
- if n_clusters:
230
- n_clusters = int(n_clusters)
231
-
232
- cqs, cq_embeddings = compute_embeddings(cqs)
233
-
234
- if cluster_method == "Agglomerative clustering":
235
- cq_clusters, cluster_image = agglomerative_clustering(cqs, cq_embeddings, n_clusters)
236
- else: # cluster_method == "LLM clustering"
237
- cq_clusters, cluster_image = llm_cq_clustering(cqs, n_clusters, openai_api_key)
238
-
239
- return cluster_image, json.dumps(cq_clusters, indent=4)
240
-
241
- def ontology_testing(ontology_file, ontology_desc, cqs):
242
- check_api_key()
243
- verbalisation = verbalise_ontology(ontology_file, ontology_desc, "")
244
- messages = [{
245
- "role": "system",
246
- "content": "Please (1) provide a description of the ontology uploaded to provide basic information and "
247
- "additional context, (2) give the competency questions (CQs) that you want to test with."
248
- }, {
249
- "role": "user",
250
- "content": verbalisation + "\n" + f"Given the above ontology, please label each competency question: {cqs} to "
251
- f"determine whether it is addressed properly or not. Format your response in"
252
- f" ['yes': 'CQ1', 'no': 'CQ2', ...]."
253
- }]
254
- bot_message = chat_completion(openai_api_key, messages)
255
- return bot_message
 
 
 
 
105
  5. The language is concise, precise, and written in a professional tone.
106
  6. The final refined answer is structured as bullet points, with each outcome represented as a separate bullet.""",
107
  ]
 
 
 
 
 
 
 
108
 
109
  def set_openai_api_key(api_key: str):
110
  global openai_api_key
 
179
 
180
  return history, ""
181
 
182
+ def load_example(selection):
183
+ return current_preidentified[selection]
184
+
185
+ # def cq_generator(message, history):
186
+ # check_api_key()
187
+ # instructions = [{
188
+ # "role": "assistant",
189
+ # "content": "You are a conversational ontology engineering assistant."
190
+ # }, {
191
+ # "role": "user",
192
+ # "content": "Here are instructions for you on how to generate high-quality competency questions. First, here "
193
+ # "are some good examples of competency questions generated from example data. Who performs the song? "
194
+ # "from the data Yesterday was performed by Armando Rocca, When (what year) was the building built? "
195
+ # "from the data The Church was built in 1619, In which context is the building located? from the "
196
+ # "data The Church is located in a periurban context. Second, how to make them less complex. Take the "
197
+ # "generated competency questions and check if any of them can be divided into multiple questions. If "
198
+ # "they do, split the competency question into multiple competency questions. If it does not, leave "
199
+ # "the competency question as it is. For example, the competency question Who wrote The Hobbit and in "
200
+ # "what year was the book written? must be split into two competency questions: Who wrote the book? "
201
+ # "and In what year was the book written?. Another example is the competency question, When was the "
202
+ # "person born?. This competency question cannot be divided into multiple questions. Third, how to "
203
+ # "remove real entities to abstract them. Take the competency questions and check if they contain "
204
+ # "real-world entities, like Freddy Mercury or 1837. If they do, change those real-world entities "
205
+ # "from these competency questions to more general concepts. For example, the competency question "
206
+ # "Which is the author of Harry Potter? should be changed to Which is the author of the book?. "
207
+ # "Similarly, the competency question Who wrote the book in 2018? should be changed to Who wrote the "
208
+ # "book, and in what year was the book written?"
209
+ # }]
210
+ # messages = build_messages(history)
211
+ # messages.append({
212
+ # "role": "user",
213
+ # "content": message
214
+ # })
215
+ # bot_message = chat_completion(openai_api_key, instructions + messages)
216
+ # history.append([message, bot_message])
217
+ # return bot_message, history, ""
218
+
219
+ # def load_example_user_story():
220
+ # with open("data/Linka#1_MusicKnowledge.md", "r") as f:
221
+ # return f.read()
222
+
223
+ # def clustering_generator(cqs, cluster_method, n_clusters):
224
+ # check_api_key()
225
+ # if n_clusters:
226
+ # n_clusters = int(n_clusters)
227
+
228
+ # cqs, cq_embeddings = compute_embeddings(cqs)
229
+
230
+ # if cluster_method == "Agglomerative clustering":
231
+ # cq_clusters, cluster_image = agglomerative_clustering(cqs, cq_embeddings, n_clusters)
232
+ # else: # cluster_method == "LLM clustering"
233
+ # cq_clusters, cluster_image = llm_cq_clustering(cqs, n_clusters, openai_api_key)
234
+
235
+ # return cluster_image, json.dumps(cq_clusters, indent=4)
236
+
237
+ # def ontology_testing(ontology_file, ontology_desc, cqs):
238
+ # check_api_key()
239
+ # verbalisation = verbalise_ontology(ontology_file, ontology_desc, "")
240
+ # messages = [{
241
+ # "role": "system",
242
+ # "content": "Please (1) provide a description of the ontology uploaded to provide basic information and "
243
+ # "additional context, (2) give the competency questions (CQs) that you want to test with."
244
+ # }, {
245
+ # "role": "user",
246
+ # "content": verbalisation + "\n" + f"Given the above ontology, please label each competency question: {cqs} to "
247
+ # f"determine whether it is addressed properly or not. Format your response in"
248
+ # f" ['yes': 'CQ1', 'no': 'CQ2', ...]."
249
+ # }]
250
+ # bot_message = chat_completion(openai_api_key, messages)
251
+ # return bot_message