1hangzhao commited on
Commit
a77f29d
·
verified ·
1 Parent(s): 66ea0b9

Update ontochat/functions.py

Browse files
Files changed (1) hide show
  1. ontochat/functions.py +106 -87
ontochat/functions.py CHANGED
@@ -1,38 +1,91 @@
1
- """
2
- Interface functions
3
- """
4
-
5
  import json
6
-
7
  from ontochat.chatbot import chat_completion, build_messages
8
  from ontochat.analysis import compute_embeddings, agglomerative_clustering, llm_cq_clustering
9
  from ontochat.verbaliser import verbalise_ontology
10
-
11
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def set_openai_api_key(api_key: str):
13
  global openai_api_key
14
  openai_api_key = api_key
15
  return "API key has been set! Now you can chat with the chatbot. Enjoy :)"
16
 
 
 
 
 
 
 
17
 
18
  def user_story_generator(message, history):
19
- instructions = [{
 
20
  "role": "system",
21
- "content": "You are a conversational ontology engineering assistant."
22
- }, {
23
- "role": "user",
24
- "content": "I am a domain expert trying to create a user story to be used by ontology engineers. You are the "
25
- "ontology expert. Only ask the following question once I have responded. Ask for the"
26
- "specifications to generate a user story as a user of the system, which should include: 1. The "
27
- "Persona: What are the name, occupation, skills and interests of the user? 2. The Goal: What is "
28
- "the goal of the user? Are they facing specific issues? 3. Example Data: Do you have examples of "
29
- "the specific data available? Make sure you have answers to all three questions before providing "
30
- "a user story. The user story should be written in the following structure: title, persona, goal, "
31
- "scenario (where the user could use a structured knowledge base to help with their work), and "
32
- "example data. Only ask the next question once I have responded. And you should also ask questions "
33
- "to elaborate on more information after the user provides the initial information, and ask for "
34
- "feedback and suggestions after the user story is generated."
35
- }]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  messages = build_messages(history)
37
  messages.append({
38
  "role": "user",
@@ -40,49 +93,35 @@ def user_story_generator(message, history):
40
  })
41
  bot_message = chat_completion(openai_api_key, instructions + messages)
42
  history.append([message, bot_message])
43
- return bot_message, history, ""
44
-
45
-
46
- # def load_user_story_prompt():
47
- # """
48
- #
49
- # :return:
50
- # """
51
- # prompt = """
52
- # Now create the full user story.The user story should be written in the following structure:
53
- #
54
- # Title: Which topics are covered by the user story?
55
- #
56
- # Persona: What is the occupation of the user and what are their goals?
57
- #
58
- # Goal:
59
- # Keywords: provide 5-10 keywords related to the user story
60
- # Provide the issues a user is facing and how our application can help reach their goals.
61
- #
62
- # Scenario:
63
- # Write out a scenario, where the user could use a structured knowledge base to help with their work.
64
- #
65
- # Example Data:
66
- #
67
- # Think of a list of requirements and provide example data for each requirement. Structure the example data by requirements
68
- # Example data should by simple sentences.
69
- # These are possible formats:
70
- # One sonata is a “Salmo alla Romana”.
71
- # A concert played in San Pietro di Sturla for exhibition was recorded by ethnomusicologist Mauro Balma in 1994.
72
- # The Church of San Pietro di Sturla is located in Carasco, Genova Province.
73
- # The Sistema Ligure is described in the text “Campanari, campane e campanili di Liguria” By Mauro Balma, 1996.
74
- # """
75
- # return prompt
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  def cq_generator(message, history):
79
- """
80
- generate competency questions based on the user story
81
- format constraint may not be necessary if we only use LLMs for clustering
82
- :param message:
83
- :param history:
84
- :return:
85
- """
86
  instructions = [{
87
  "role": "system",
88
  "content": "You are a conversational ontology engineering assistant."
@@ -115,25 +154,12 @@ def cq_generator(message, history):
115
  history.append([message, bot_message])
116
  return bot_message, history, ""
117
 
118
-
119
  def load_example_user_story():
120
- """
121
- load example user story
122
- TODO: more examples
123
- :return:
124
- """
125
- f = open("data/Linka#1_MusicKnowledge.md", "r")
126
- return f.read()
127
-
128
 
129
  def clustering_generator(cqs, cluster_method, n_clusters):
130
- """
131
-
132
- :param cqs:
133
- :param cluster_method:
134
- :param n_clusters: default ''
135
- :return:
136
- """
137
  if n_clusters:
138
  n_clusters = int(n_clusters)
139
 
@@ -146,15 +172,8 @@ def clustering_generator(cqs, cluster_method, n_clusters):
146
 
147
  return cluster_image, json.dumps(cq_clusters, indent=4)
148
 
149
-
150
  def ontology_testing(ontology_file, ontology_desc, cqs):
151
- """
152
-
153
- :param ontology_file:
154
- :param ontology_desc:
155
- :param cqs:
156
- :return:
157
- """
158
  verbalisation = verbalise_ontology(ontology_file, ontology_desc, "")
159
  messages = [{
160
  "role": "system",
 
 
 
 
 
1
  import json
 
2
  from ontochat.chatbot import chat_completion, build_messages
3
  from ontochat.analysis import compute_embeddings, agglomerative_clustering, llm_cq_clustering
4
  from ontochat.verbaliser import verbalise_ontology
5
+ import gradio as gr
6
+
7
+ openai_api_key = None
8
+
9
+ # Global dictionaries to hold pre-identified prompts and their corresponding long prompts
10
+ preidentified_prompts = {
11
+ "expand_skills_interests": "My skills are [skills] and my interests are [interests]. Can you help me expand these and ensure they align with my occupation?",
12
+
13
+ "expand_user_goal": "My user goal description is [user goal description]. Can you help me refine this goal, without changing the actions or keywords, and ensure it aligns with my interests and domain?",
14
+
15
+ "expand_actions": "The actions I need to take to achieve my goal are [actions]. Can you help me expand these actions, ensuring they align with my occupation and skills, without changing the goal description or keywords?",
16
+
17
+ "expand_keywords": "The keywords for my user goal and actions are [keywords]. Can you help me refine these keywords, ensuring they are relevant to my goal and actions, without changing the goal or actions themselves?",
18
+
19
+ "expand_current_methods": "The current methods I use to perform my actions are [current methods]. Can you help me elaborate on these methods, ensuring they align with my occupation, skills, and goal, without changing the challenges, new methods, or outcomes?",
20
+
21
+ "expand_challenges": "The challenges I face with my current methods are [challenges]. Can you help me expand on these challenges, ensuring they logically relate to my skills and goal, without changing the current methods, new methods, or outcomes?",
22
+
23
+ "expand_new_methods": "The new methods I will use through this ontology to address my challenges are [new methods]. Can you help me elaborate on these new methods, ensuring they align with my occupation, skills, and goal, without changing the current methods, challenges, or outcomes?",
24
+
25
+ "expand_outcomes": "The outcomes I expect after implementing the new methods are [outcomes]. Can you help me refine these outcomes, ensuring they address the challenges and benefit my goal, without changing the current methods, challenges, or new methods?"
26
+ }
27
+
28
+ current_preidentified_prompts = []
29
+ current_preidentified = [
30
+ "Cultural Heritage Preservation",
31
+ "Accessibility and Inclusion",
32
+ "Multisensory Interactions",
33
+ "Digital Rights and Preservation"
34
+ ]
35
+
36
+ def load_example(selection):
37
+ return current_preidentified[selection]
38
+
39
+ def update_examples():
40
+ current_samples = current_preidentified_prompts
41
+ return gr.Dataset(samples=current_samples)
42
+
43
  def set_openai_api_key(api_key: str):
44
  global openai_api_key
45
  openai_api_key = api_key
46
  return "API key has been set! Now you can chat with the chatbot. Enjoy :)"
47
 
48
+ def check_api_key():
49
+ if openai_api_key is None:
50
+ raise ValueError("OpenAI API key is not set. Please set it using the 'Set API Key' button.")
51
+
52
+ def get_preidentified_prompts():
53
+ return list(preidentified_prompts.keys())
54
 
55
  def user_story_generator(message, history):
56
+ instructions = [
57
+ {
58
  "role": "system",
59
+ "content":
60
+ "Ontology construction involves creating structured frameworks to represent knowledge in a specific domain. Ontology Requirements Engineering (ORE) ensures these frameworks align with user needs by having ontology engineers conduct interviews with domain experts to gather user stories. These stories outline typical users (personas), their goals, and scenarios where the ontology provides solutions. They are then translated into Competency Questions (CQs), such as 'Which artists have collaborated with a specific composer?', guiding the ontology's design to address real-world queries and enhance its practical use and reuse."
61
+
62
+ "You are an ontology engineer conducting an interview with a domain expert to gather information for writing an ontology user story. Follow the instructions below, asking one elicitation question each time and explain in one sentence on how to answer it, answering domain expert queries when needed. Only move to the next section or question after the current section or question's requirements are fully addressed. When a domain expert requests expansion, provide just one focused point in one sentence, directly aligned with their current answer. Do not answer any queries that are not related to this task. \n\n"
63
+
64
+ "1. Persona\n"
65
+ "Start by creating a persona that represents a typical user of your ontology. Ask one elicitation question for details includes [name], [age], [occupation], [skills], and [interests].\n"
66
+ "Once the expert provides this information, suggest possible improvements or clarifications. After all persona details are collected, move to the next section.\n\n"
67
+
68
+ "2. Goal\n"
69
+ "Ask one elicitation question for a description of the [user goal], explaining what the persona wants to achieve using your ontology. Ensure that it aligns with their skills and occupation.\n"
70
+ "Ask one elicitation question for the specific [actions] the persona will take to accomplish the goal and ask one elicitation question for gathering up to 5 relevant [keywords] that summarize the goal and actions.\n"
71
+ "Once the expert has answered, offer suggestions for further refinement, then proceed to the next section.\n\n"
72
+
73
+ "3. Scenario\n"
74
+ "[Scenario before]: Ask one elicitation question for the expert to describe the [current methods] the persona uses to perform the actions. Ask one elicitation question for the [challenges] they face when performing current methods, making sure these align with the persona's occupation and skills.\n"
75
+ "[Scenario during]: Ask one elicitation question for the expert to explain how their ontology introduces [new methods] to help them overcome these challenges, ensuring the methods are relevant to their role.\n"
76
+ "[Scenario after]: Ask one elicitation question for the expert to describe the [outcomes] after using the ontology and how it helps them achieve their goal.\n"
77
+ "Provide feedback on each scenario part and refine the answers if needed before moving on."
78
+
79
+ "4. Create User Story\n"
80
+ "Once you have completed sections 1 to 3, summarize the information into a full user story. Use the persona, goal, and scenario information to craft the user story in this format:\n\n"
81
+ "Persona: [name], [age], [occupation], [skills], [interests].\n"
82
+ "Goal: [user goal description], with actions such as [actions]. Keywords: [keywords].\n"
83
+ "Scenario Before: [current methods] the persona uses and the [challenges] they face.\n"
84
+ "Scenario During: How your ontology introduces [new methods] to overcome these challenges.\n"
85
+ "Scenario After: The [outcomes] achieved by using the ontology and how the persona's goal has been accomplished.\n\n"
86
+ "Provide the user story to the domain expert and Ask one elicitation question for any further feedback or refinements. If needed, adjust the story based on their suggestions."
87
+ }
88
+ ]
89
  messages = build_messages(history)
90
  messages.append({
91
  "role": "user",
 
93
  })
94
  bot_message = chat_completion(openai_api_key, instructions + messages)
95
  history.append([message, bot_message])
96
+ questions = generate_elicitation_questions(history)
97
+ return history, "", questions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
+ def generate_elicitation_questions(history):
100
+ instructions = [{
101
+ "role": "system",
102
+ "content": f"In the dictionary {preidentified_prompts}, the key expand_skills_interests corresponds to section 1, '1. Persona'. The keys expand_user_goal, expand_actions, and expand_keywords correspond to section 2, '2. Goal'. The keys expand_current_methods, expand_challenges, expand_new_methods, and expand_outcomes correspond to section 3, '3. Scenario'. All keys in this dictionary are related to section 4, '4. Create User Story'."
103
+ }]
104
+ messages = build_messages(history)
105
+ messages.append({
106
+ "role": "user",
107
+ "content": "Identify which section ('1. Persona', '2. Goal', '3. Scenario', '4. Create User Story') your most current answer or elicitation question belongs to."
108
+ "If your answer or elicitation question relates to [name], [age], [occupation], [skills], or [interests], it belongs to section 1. If it's about [user goal description], [actions], or [keywords], it belongs to section 2. If it addresses [current methods], [challenges], [new methods], or [outcomes], it belongs to section 3. If sections 1 through 3 are already completed, it belongs to section 4. Once it belongs to section 4, it will always remain in section 4, regardless of what happens afterward."
109
+ "Then return all the keys associated with that section. List each key name on a separate line, starting with the first key name and providing only the key names."
110
+ })
111
+ bot_message = chat_completion(openai_api_key, instructions + messages)
112
+ keys = bot_message.split('\n')
113
+ global current_preidentified_prompts
114
+ current_preidentified_prompts = [[key] for key in keys if key in preidentified_prompts]
115
+ global current_preidentified
116
+ current_preidentified=[]
117
+ for sublist in current_preidentified_prompts:
118
+ key = sublist[0]
119
+ if key in preidentified_prompts:
120
+ current_preidentified.append(preidentified_prompts[key])
121
+ return [[key] for key in keys if key in preidentified_prompts]
122
 
123
  def cq_generator(message, history):
124
+ check_api_key()
 
 
 
 
 
 
125
  instructions = [{
126
  "role": "system",
127
  "content": "You are a conversational ontology engineering assistant."
 
154
  history.append([message, bot_message])
155
  return bot_message, history, ""
156
 
 
157
  def load_example_user_story():
158
+ with open("data/Linka#1_MusicKnowledge.md", "r") as f:
159
+ return f.read()
 
 
 
 
 
 
160
 
161
  def clustering_generator(cqs, cluster_method, n_clusters):
162
+ check_api_key()
 
 
 
 
 
 
163
  if n_clusters:
164
  n_clusters = int(n_clusters)
165
 
 
172
 
173
  return cluster_image, json.dumps(cq_clusters, indent=4)
174
 
 
175
  def ontology_testing(ontology_file, ontology_desc, cqs):
176
+ check_api_key()
 
 
 
 
 
 
177
  verbalisation = verbalise_ontology(ontology_file, ontology_desc, "")
178
  messages = [{
179
  "role": "system",