Update ontochat/functions.py
Browse files- ontochat/functions.py +70 -74
ontochat/functions.py
CHANGED
@@ -105,13 +105,6 @@ current_preidentified = [
|
|
105 |
5. The language is concise, precise, and written in a professional tone.
|
106 |
6. The final refined answer is structured as bullet points, with each outcome represented as a separate bullet.""",
|
107 |
]
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
def load_example(selection):
|
114 |
-
return current_preidentified[selection]
|
115 |
|
116 |
def set_openai_api_key(api_key: str):
|
117 |
global openai_api_key
|
@@ -186,70 +179,73 @@ def user_story_generator(message, history):
|
|
186 |
|
187 |
return history, ""
|
188 |
|
189 |
-
def
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
|
|
|
|
|
|
|
105 |
5. The language is concise, precise, and written in a professional tone.
|
106 |
6. The final refined answer is structured as bullet points, with each outcome represented as a separate bullet.""",
|
107 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
|
109 |
def set_openai_api_key(api_key: str):
|
110 |
global openai_api_key
|
|
|
179 |
|
180 |
return history, ""
|
181 |
|
182 |
+
def load_example(selection):
|
183 |
+
return current_preidentified[selection]
|
184 |
+
|
185 |
+
# def cq_generator(message, history):
|
186 |
+
# check_api_key()
|
187 |
+
# instructions = [{
|
188 |
+
# "role": "assistant",
|
189 |
+
# "content": "You are a conversational ontology engineering assistant."
|
190 |
+
# }, {
|
191 |
+
# "role": "user",
|
192 |
+
# "content": "Here are instructions for you on how to generate high-quality competency questions. First, here "
|
193 |
+
# "are some good examples of competency questions generated from example data. Who performs the song? "
|
194 |
+
# "from the data Yesterday was performed by Armando Rocca, When (what year) was the building built? "
|
195 |
+
# "from the data The Church was built in 1619, In which context is the building located? from the "
|
196 |
+
# "data The Church is located in a periurban context. Second, how to make them less complex. Take the "
|
197 |
+
# "generated competency questions and check if any of them can be divided into multiple questions. If "
|
198 |
+
# "they do, split the competency question into multiple competency questions. If it does not, leave "
|
199 |
+
# "the competency question as it is. For example, the competency question Who wrote The Hobbit and in "
|
200 |
+
# "what year was the book written? must be split into two competency questions: Who wrote the book? "
|
201 |
+
# "and In what year was the book written?. Another example is the competency question, When was the "
|
202 |
+
# "person born?. This competency question cannot be divided into multiple questions. Third, how to "
|
203 |
+
# "remove real entities to abstract them. Take the competency questions and check if they contain "
|
204 |
+
# "real-world entities, like Freddy Mercury or 1837. If they do, change those real-world entities "
|
205 |
+
# "from these competency questions to more general concepts. For example, the competency question "
|
206 |
+
# "Which is the author of Harry Potter? should be changed to Which is the author of the book?. "
|
207 |
+
# "Similarly, the competency question Who wrote the book in 2018? should be changed to Who wrote the "
|
208 |
+
# "book, and in what year was the book written?"
|
209 |
+
# }]
|
210 |
+
# messages = build_messages(history)
|
211 |
+
# messages.append({
|
212 |
+
# "role": "user",
|
213 |
+
# "content": message
|
214 |
+
# })
|
215 |
+
# bot_message = chat_completion(openai_api_key, instructions + messages)
|
216 |
+
# history.append([message, bot_message])
|
217 |
+
# return bot_message, history, ""
|
218 |
+
|
219 |
+
# def load_example_user_story():
|
220 |
+
# with open("data/Linka#1_MusicKnowledge.md", "r") as f:
|
221 |
+
# return f.read()
|
222 |
+
|
223 |
+
# def clustering_generator(cqs, cluster_method, n_clusters):
|
224 |
+
# check_api_key()
|
225 |
+
# if n_clusters:
|
226 |
+
# n_clusters = int(n_clusters)
|
227 |
+
|
228 |
+
# cqs, cq_embeddings = compute_embeddings(cqs)
|
229 |
+
|
230 |
+
# if cluster_method == "Agglomerative clustering":
|
231 |
+
# cq_clusters, cluster_image = agglomerative_clustering(cqs, cq_embeddings, n_clusters)
|
232 |
+
# else: # cluster_method == "LLM clustering"
|
233 |
+
# cq_clusters, cluster_image = llm_cq_clustering(cqs, n_clusters, openai_api_key)
|
234 |
+
|
235 |
+
# return cluster_image, json.dumps(cq_clusters, indent=4)
|
236 |
+
|
237 |
+
# def ontology_testing(ontology_file, ontology_desc, cqs):
|
238 |
+
# check_api_key()
|
239 |
+
# verbalisation = verbalise_ontology(ontology_file, ontology_desc, "")
|
240 |
+
# messages = [{
|
241 |
+
# "role": "system",
|
242 |
+
# "content": "Please (1) provide a description of the ontology uploaded to provide basic information and "
|
243 |
+
# "additional context, (2) give the competency questions (CQs) that you want to test with."
|
244 |
+
# }, {
|
245 |
+
# "role": "user",
|
246 |
+
# "content": verbalisation + "\n" + f"Given the above ontology, please label each competency question: {cqs} to "
|
247 |
+
# f"determine whether it is addressed properly or not. Format your response in"
|
248 |
+
# f" ['yes': 'CQ1', 'no': 'CQ2', ...]."
|
249 |
+
# }]
|
250 |
+
# bot_message = chat_completion(openai_api_key, messages)
|
251 |
+
# return bot_message
|