Phoenix21 commited on
Commit
c0a0835
·
verified ·
1 Parent(s): b3d8117

Create chain_summary.py

Browse files
Files changed (1) hide show
  1. chain_summary.py +40 -0
chain_summary.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chain_summary.py
2
+ import json
3
+ from typing import Dict
4
+ from langchain import PromptTemplate, LLMChain
5
+ from models import chat_model
6
+
7
+ final_prompt_template = PromptTemplate(
8
+ input_variables=["report", "problems", "recommendation"],
9
+ template=(
10
+ "Based on the following information:\n"
11
+ "Report:\n{report}\n\n"
12
+ "Problem Severity Percentages:\n{problems}\n\n"
13
+ "Recommended Packages:\n{recommendation}\n\n"
14
+ "Generate a short summary suitable for video narration that synthesizes this information."
15
+ )
16
+ )
17
+ final_chain = LLMChain(llm=chat_model, prompt=final_prompt_template)
18
+
19
+ def generate_final_summary(report: str, problems: Dict[str, float], recommendation: str) -> str:
20
+ summary = final_chain.run(
21
+ report=report,
22
+ problems=json.dumps(problems),
23
+ recommendation=recommendation
24
+ )
25
+ return summary.strip()
26
+
27
+ shorten_prompt_template = PromptTemplate(
28
+ input_variables=["final_summary"],
29
+ template=(
30
+ "Shorten the following summary to make it concise and engaging for video narration. "
31
+ "Ensure all key points remain intact:\n\n"
32
+ "{final_summary}\n\n"
33
+ "Shortened Summary:"
34
+ )
35
+ )
36
+ shorten_chain = LLMChain(llm=chat_model, prompt=shorten_prompt_template)
37
+
38
+ def shorten_summary(final_summary: str) -> str:
39
+ shortened = shorten_chain.run(final_summary=final_summary)
40
+ return shortened.strip()