Initial Draft
Browse files- app.py +15 -13
- dto/release_notes.py +1 -22
- dto/requirement_gathering.py +1 -20
- dto/user_story.py +0 -20
app.py
CHANGED
|
@@ -222,7 +222,7 @@ def fn_generate_speech_to_text(ui_audio_bytes,ui_api_key):
|
|
| 222 |
raise error
|
| 223 |
|
| 224 |
# Generate LLM response
|
| 225 |
-
def fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, ui_processing_message):
|
| 226 |
"""Generate LLM response"""
|
| 227 |
|
| 228 |
fn_display_user_messages("Generating LLM Response","Info", ui_processing_message)
|
|
@@ -230,6 +230,9 @@ def fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model
|
|
| 230 |
|
| 231 |
try:
|
| 232 |
|
|
|
|
|
|
|
|
|
|
| 233 |
if(ui_llm_provider == 'Google VertexAI' or ui_llm_provider=='Google' or ui_llm_provider=='OpenAI' or ui_llm_provider=='Groq' or ui_llm_provider=='Cohere'):
|
| 234 |
lv_response = lv_model.invoke(lv_summarize_prompt_formatted).content
|
| 235 |
else:
|
|
@@ -471,14 +474,13 @@ def main():
|
|
| 471 |
# -- Generate User Story LLM Response
|
| 472 |
if ui_llm_provider and lv_extracted_text and not(lv_response) and "User Story" in ui_user_actions:
|
| 473 |
# -- Pydantice Schema
|
| 474 |
-
lv_parser = PydanticOutputParser(pydantic_object=us.UserStory)
|
| 475 |
|
| 476 |
# -- Creating Prompt
|
| 477 |
lv_template = pt.CN_USER_STORY
|
| 478 |
lv_summarize_prompt = PromptTemplate(
|
| 479 |
template=lv_template,
|
| 480 |
-
input_variables=["context"]
|
| 481 |
-
partial_variables={"format_instructions": lv_parser.get_format_instructions()},
|
| 482 |
)
|
| 483 |
lv_summarize_prompt_formatted = lv_summarize_prompt.format(
|
| 484 |
context=lv_extracted_text
|
|
@@ -486,7 +488,7 @@ def main():
|
|
| 486 |
|
| 487 |
# -- LLM Response
|
| 488 |
if lv_model:
|
| 489 |
-
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, ui_processing_message)
|
| 490 |
st.session_state.lv_response = lv_response
|
| 491 |
|
| 492 |
# -- Display LLM response
|
|
@@ -504,14 +506,14 @@ def main():
|
|
| 504 |
with release_notes:
|
| 505 |
if ui_llm_provider and lv_extracted_text and not(lv_response) and "Release Notes" in ui_user_actions:
|
| 506 |
# -- Pydantice Schema
|
| 507 |
-
lv_parser = PydanticOutputParser(pydantic_object=rs.ReleaseNotes)
|
| 508 |
|
| 509 |
# -- Creating Prompt
|
| 510 |
lv_template = pt.CN_RELEASE_NOTES
|
| 511 |
lv_summarize_prompt = PromptTemplate(
|
| 512 |
template=lv_template,
|
| 513 |
input_variables=["context"],
|
| 514 |
-
partial_variables={"format_instructions": lv_parser.get_format_instructions()},
|
| 515 |
)
|
| 516 |
lv_summarize_prompt_formatted = lv_summarize_prompt.format(
|
| 517 |
context=lv_extracted_text
|
|
@@ -519,7 +521,7 @@ def main():
|
|
| 519 |
|
| 520 |
# -- LLM Response
|
| 521 |
if lv_model:
|
| 522 |
-
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, ui_processing_message)
|
| 523 |
st.session_state.lv_response = lv_response
|
| 524 |
|
| 525 |
# -- Display LLM response
|
|
@@ -535,14 +537,14 @@ def main():
|
|
| 535 |
with requirement_generation:
|
| 536 |
if ui_llm_provider and lv_extracted_text and not(lv_response) and "Requirement Generation" in ui_user_actions:
|
| 537 |
# -- Pydantice Schema
|
| 538 |
-
lv_parser = PydanticOutputParser(pydantic_object=rq.RequirementGatheringDetails)
|
| 539 |
|
| 540 |
# -- Creating Prompt
|
| 541 |
lv_template = pt.CN_REQUIREMENT_GATHERING
|
| 542 |
lv_summarize_prompt = PromptTemplate(
|
| 543 |
template=lv_template,
|
| 544 |
input_variables=["context"],
|
| 545 |
-
partial_variables={"format_instructions": lv_parser.get_format_instructions()},
|
| 546 |
)
|
| 547 |
lv_summarize_prompt_formatted = lv_summarize_prompt.format(
|
| 548 |
context=lv_extracted_text
|
|
@@ -550,7 +552,7 @@ def main():
|
|
| 550 |
|
| 551 |
# -- LLM Response
|
| 552 |
if lv_model:
|
| 553 |
-
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, ui_processing_message)
|
| 554 |
st.session_state.lv_response = lv_response
|
| 555 |
|
| 556 |
# -- Display LLM response
|
|
@@ -573,7 +575,7 @@ def main():
|
|
| 573 |
ui_summary_input = st.text_area("Input Text", value=lv_extracted_text)
|
| 574 |
if st.button("Summarize",key="summary"):
|
| 575 |
# -- Creating Prompt
|
| 576 |
-
lv_template = pt.CN_SUMMARY
|
| 577 |
lv_summarize_prompt = PromptTemplate(
|
| 578 |
template=lv_template,
|
| 579 |
input_variables=["context"]
|
|
@@ -584,7 +586,7 @@ def main():
|
|
| 584 |
|
| 585 |
# -- LLM Response
|
| 586 |
if lv_model:
|
| 587 |
-
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, ui_processing_message)
|
| 588 |
st.session_state.lv_response = lv_response
|
| 589 |
|
| 590 |
# -- Display LLM response
|
|
|
|
| 222 |
raise error
|
| 223 |
|
| 224 |
# Generate LLM response
|
| 225 |
+
def fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, lv_pydantic_object, ui_processing_message):
|
| 226 |
"""Generate LLM response"""
|
| 227 |
|
| 228 |
fn_display_user_messages("Generating LLM Response","Info", ui_processing_message)
|
|
|
|
| 230 |
|
| 231 |
try:
|
| 232 |
|
| 233 |
+
if lv_pydantic_object:
|
| 234 |
+
lv_model = llm.with_structured_output(lv_pydantic_object)
|
| 235 |
+
|
| 236 |
if(ui_llm_provider == 'Google VertexAI' or ui_llm_provider=='Google' or ui_llm_provider=='OpenAI' or ui_llm_provider=='Groq' or ui_llm_provider=='Cohere'):
|
| 237 |
lv_response = lv_model.invoke(lv_summarize_prompt_formatted).content
|
| 238 |
else:
|
|
|
|
| 474 |
# -- Generate User Story LLM Response
|
| 475 |
if ui_llm_provider and lv_extracted_text and not(lv_response) and "User Story" in ui_user_actions:
|
| 476 |
# -- Pydantice Schema
|
| 477 |
+
# lv_parser = PydanticOutputParser(pydantic_object=us.UserStory)
|
| 478 |
|
| 479 |
# -- Creating Prompt
|
| 480 |
lv_template = pt.CN_USER_STORY
|
| 481 |
lv_summarize_prompt = PromptTemplate(
|
| 482 |
template=lv_template,
|
| 483 |
+
input_variables=["context"]
|
|
|
|
| 484 |
)
|
| 485 |
lv_summarize_prompt_formatted = lv_summarize_prompt.format(
|
| 486 |
context=lv_extracted_text
|
|
|
|
| 488 |
|
| 489 |
# -- LLM Response
|
| 490 |
if lv_model:
|
| 491 |
+
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, us.UserStory, ui_processing_message)
|
| 492 |
st.session_state.lv_response = lv_response
|
| 493 |
|
| 494 |
# -- Display LLM response
|
|
|
|
| 506 |
with release_notes:
|
| 507 |
if ui_llm_provider and lv_extracted_text and not(lv_response) and "Release Notes" in ui_user_actions:
|
| 508 |
# -- Pydantice Schema
|
| 509 |
+
# lv_parser = PydanticOutputParser(pydantic_object=rs.ReleaseNotes)
|
| 510 |
|
| 511 |
# -- Creating Prompt
|
| 512 |
lv_template = pt.CN_RELEASE_NOTES
|
| 513 |
lv_summarize_prompt = PromptTemplate(
|
| 514 |
template=lv_template,
|
| 515 |
input_variables=["context"],
|
| 516 |
+
# partial_variables={"format_instructions": lv_parser.get_format_instructions()},
|
| 517 |
)
|
| 518 |
lv_summarize_prompt_formatted = lv_summarize_prompt.format(
|
| 519 |
context=lv_extracted_text
|
|
|
|
| 521 |
|
| 522 |
# -- LLM Response
|
| 523 |
if lv_model:
|
| 524 |
+
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, rs.ReleaseNotes, ui_processing_message)
|
| 525 |
st.session_state.lv_response = lv_response
|
| 526 |
|
| 527 |
# -- Display LLM response
|
|
|
|
| 537 |
with requirement_generation:
|
| 538 |
if ui_llm_provider and lv_extracted_text and not(lv_response) and "Requirement Generation" in ui_user_actions:
|
| 539 |
# -- Pydantice Schema
|
| 540 |
+
# lv_parser = PydanticOutputParser(pydantic_object=rq.RequirementGatheringDetails)
|
| 541 |
|
| 542 |
# -- Creating Prompt
|
| 543 |
lv_template = pt.CN_REQUIREMENT_GATHERING
|
| 544 |
lv_summarize_prompt = PromptTemplate(
|
| 545 |
template=lv_template,
|
| 546 |
input_variables=["context"],
|
| 547 |
+
# partial_variables={"format_instructions": lv_parser.get_format_instructions()},
|
| 548 |
)
|
| 549 |
lv_summarize_prompt_formatted = lv_summarize_prompt.format(
|
| 550 |
context=lv_extracted_text
|
|
|
|
| 552 |
|
| 553 |
# -- LLM Response
|
| 554 |
if lv_model:
|
| 555 |
+
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, rq.RequirementGatheringDetails, ui_processing_message)
|
| 556 |
st.session_state.lv_response = lv_response
|
| 557 |
|
| 558 |
# -- Display LLM response
|
|
|
|
| 575 |
ui_summary_input = st.text_area("Input Text", value=lv_extracted_text)
|
| 576 |
if st.button("Summarize",key="summary"):
|
| 577 |
# -- Creating Prompt
|
| 578 |
+
# lv_template = pt.CN_SUMMARY
|
| 579 |
lv_summarize_prompt = PromptTemplate(
|
| 580 |
template=lv_template,
|
| 581 |
input_variables=["context"]
|
|
|
|
| 586 |
|
| 587 |
# -- LLM Response
|
| 588 |
if lv_model:
|
| 589 |
+
lv_response = fn_chatllm_response(ui_llm_provider, lv_summarize_prompt_formatted, lv_model, pt.CN_SUMMARY, ui_processing_message)
|
| 590 |
st.session_state.lv_response = lv_response
|
| 591 |
|
| 592 |
# -- Display LLM response
|
dto/release_notes.py
CHANGED
|
@@ -11,31 +11,10 @@ class Enhancement(BaseModel):
|
|
| 11 |
benefits: str = Field(description="The benefit or impact on the user or system as a result of the enhancement")
|
| 12 |
reason: str = Field(description="The reason for implementing the change.")
|
| 13 |
|
| 14 |
-
def get_json_template(model: BaseModel):
|
| 15 |
-
"""helper function to get the json schema from the pydantic model"""
|
| 16 |
-
|
| 17 |
-
schema = model.model_json_schema()
|
| 18 |
-
json_schema = json.dumps(schema)
|
| 19 |
-
json_schema = Template(json_schema).substitute(
|
| 20 |
-
{"defs": "definitions", "ref": "$ref"}
|
| 21 |
-
)
|
| 22 |
-
return json.loads(json_schema)
|
| 23 |
-
|
| 24 |
-
|
| 25 |
class ReleaseNotes(BaseModel):
|
| 26 |
"""Release Notes Pydantic Schema"""
|
| 27 |
|
| 28 |
release_date: str = Field(description="Provide release date",default=datetime.now().strftime('%d-%b-%Y').upper(), const=True)
|
| 29 |
product_name: str = Field(description="Provide product name",default="Oracle Banking Retail Lending", const=True)
|
| 30 |
summary: str = Field(description="A brief introduction highlighting the key focus of this release")
|
| 31 |
-
enhancements: List[Enhancement] = Field(description="List of enhancements in this release")
|
| 32 |
-
|
| 33 |
-
def get_json_template(model: BaseModel):
|
| 34 |
-
"""helper function to get the json schema from the pydantic model"""
|
| 35 |
-
|
| 36 |
-
schema = model.model_json_schema()
|
| 37 |
-
json_schema = json.dumps(schema)
|
| 38 |
-
json_schema = Template(json_schema).substitute(
|
| 39 |
-
{"defs": "definitions", "ref": "$ref"}
|
| 40 |
-
)
|
| 41 |
-
return json.loads(json_schema)
|
|
|
|
| 11 |
benefits: str = Field(description="The benefit or impact on the user or system as a result of the enhancement")
|
| 12 |
reason: str = Field(description="The reason for implementing the change.")
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
class ReleaseNotes(BaseModel):
|
| 15 |
"""Release Notes Pydantic Schema"""
|
| 16 |
|
| 17 |
release_date: str = Field(description="Provide release date",default=datetime.now().strftime('%d-%b-%Y').upper(), const=True)
|
| 18 |
product_name: str = Field(description="Provide product name",default="Oracle Banking Retail Lending", const=True)
|
| 19 |
summary: str = Field(description="A brief introduction highlighting the key focus of this release")
|
| 20 |
+
enhancements: List[Enhancement] = Field(description="List of enhancements in this release")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dto/requirement_gathering.py
CHANGED
|
@@ -11,28 +11,9 @@ class RequirementGathering(BaseModel):
|
|
| 11 |
priority: str = Field(description="The priority of the requirement. Return only one from list of values [High, Medium and Low]. Ground result based on the document.")
|
| 12 |
tags: List[str] = Field(description="List of tags for the requirement to identify into relevant categories(e.g., Product Specifications, Validations and Quality Standards)")
|
| 13 |
|
| 14 |
-
def get_json_template(model: BaseModel):
|
| 15 |
-
"""helper function to get the json schema from the pydantic model"""
|
| 16 |
-
|
| 17 |
-
schema = model.model_json_schema()
|
| 18 |
-
json_schema = json.dumps(schema)
|
| 19 |
-
json_schema = Template(json_schema).substitute(
|
| 20 |
-
{"defs": "definitions", "ref": "$ref"}
|
| 21 |
-
)
|
| 22 |
-
return json.loads(json_schema)
|
| 23 |
|
| 24 |
class RequirementGatheringDetails(BaseModel):
|
| 25 |
"""Requirement Gathering Details Pydantic Schema"""
|
| 26 |
|
| 27 |
header: str = Field(description="Provide the header of the document.")
|
| 28 |
-
requirements: List[RequirementGathering] = Field(description="List of requirements gathered from the document.")
|
| 29 |
-
|
| 30 |
-
def get_json_template(model: BaseModel):
|
| 31 |
-
"""helper function to get the json schema from the pydantic model"""
|
| 32 |
-
|
| 33 |
-
schema = model.model_json_schema()
|
| 34 |
-
json_schema = json.dumps(schema)
|
| 35 |
-
json_schema = Template(json_schema).substitute(
|
| 36 |
-
{"defs": "definitions", "ref": "$ref"}
|
| 37 |
-
)
|
| 38 |
-
return json.loads(json_schema)
|
|
|
|
| 11 |
priority: str = Field(description="The priority of the requirement. Return only one from list of values [High, Medium and Low]. Ground result based on the document.")
|
| 12 |
tags: List[str] = Field(description="List of tags for the requirement to identify into relevant categories(e.g., Product Specifications, Validations and Quality Standards)")
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
class RequirementGatheringDetails(BaseModel):
|
| 16 |
"""Requirement Gathering Details Pydantic Schema"""
|
| 17 |
|
| 18 |
header: str = Field(description="Provide the header of the document.")
|
| 19 |
+
requirements: List[RequirementGathering] = Field(description="List of requirements gathered from the document.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dto/user_story.py
CHANGED
|
@@ -9,16 +9,6 @@ class UserStoryScenarios(BaseModel):
|
|
| 9 |
action_details: str = Field(description="Provide action details to be taken in business application by the user role specified")
|
| 10 |
expected_outcome: str = Field(description="Provide list of activities to be allowed by business user role")
|
| 11 |
|
| 12 |
-
def get_json_template(model: BaseModel):
|
| 13 |
-
"""helper function to get the json schema from the pydantic model"""
|
| 14 |
-
|
| 15 |
-
schema = model.model_json_schema()
|
| 16 |
-
json_schema = json.dumps(schema)
|
| 17 |
-
json_schema = Template(json_schema).substitute(
|
| 18 |
-
{"defs": "definitions", "ref": "$ref"}
|
| 19 |
-
)
|
| 20 |
-
return json.loads(json_schema)
|
| 21 |
-
|
| 22 |
class UserStory(BaseModel):
|
| 23 |
"""User Story Pydantic Schema"""
|
| 24 |
|
|
@@ -27,13 +17,3 @@ class UserStory(BaseModel):
|
|
| 27 |
feature: str = Field(description="Provide application feature/functionality")
|
| 28 |
benefit: str = Field(description="Provide benefits of the feature to the user role")
|
| 29 |
user_story_scenarios : List[UserStoryScenarios] = Field(description="Provide list of user story scenarios")
|
| 30 |
-
|
| 31 |
-
def get_json_template(model: BaseModel):
|
| 32 |
-
"""helper function to get the json schema from the pydantic model"""
|
| 33 |
-
|
| 34 |
-
schema = model.model_json_schema()
|
| 35 |
-
json_schema = json.dumps(schema)
|
| 36 |
-
json_schema = Template(json_schema).substitute(
|
| 37 |
-
{"defs": "definitions", "ref": "$ref"}
|
| 38 |
-
)
|
| 39 |
-
return json.loads(json_schema)
|
|
|
|
| 9 |
action_details: str = Field(description="Provide action details to be taken in business application by the user role specified")
|
| 10 |
expected_outcome: str = Field(description="Provide list of activities to be allowed by business user role")
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
class UserStory(BaseModel):
|
| 13 |
"""User Story Pydantic Schema"""
|
| 14 |
|
|
|
|
| 17 |
feature: str = Field(description="Provide application feature/functionality")
|
| 18 |
benefit: str = Field(description="Provide benefits of the feature to the user role")
|
| 19 |
user_story_scenarios : List[UserStoryScenarios] = Field(description="Provide list of user story scenarios")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|