Spaces:
Sleeping
Sleeping
File size: 3,546 Bytes
fbd9e3e 0587761 fbd9e3e 0587761 fbd9e3e 0587761 fbd9e3e 1a1fb4f 0587761 1a1fb4f 0587761 fbd9e3e 93a273d fbd9e3e 0587761 fbd9e3e 0587761 1a1fb4f fbd9e3e 0587761 fbd9e3e 739707d fbd9e3e 0587761 fbd9e3e 0587761 1a1fb4f fbd9e3e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import os
import json
import time
import traceback
from dotenv import load_dotenv
from langfuse import Langfuse
from langchain_groq import ChatGroq
from langfuse.callback import CallbackHandler
from langchain_core.prompts import PromptTemplate
load_dotenv()
langfuse_news_analysis_handler = CallbackHandler(
secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
host="https://cloud.langfuse.com", # πͺπΊ EU region
session_id="news_analysis",
)
langfuse_post_generation_handler = CallbackHandler(
secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
host="https://cloud.langfuse.com", # πͺπΊ EU region
session_id="post_generation",
)
langfuse = Langfuse()
analysis_llm = ChatGroq(
model="llama-3.1-8b-instant",
temperature=0.8,
timeout=None,
max_retries=2,
api_key=os.getenv("GROQ_ANALYSIS_API_KEY"),
)
post_content_llm = ChatGroq(
model="qwen-qwq-32b",
temperature=0.8,
timeout=None,
max_retries=2,
api_key=os.getenv("THREADS_POST_GENERATION_API_KEY"),
)
def basic_analysis(news):
prompt = langfuse.get_prompt("news_selector")
for _ in range(5):
try:
response = analysis_llm.invoke(
prompt.compile(news_object = news),
config={"callbacks": [langfuse_news_analysis_handler]}
)
print("################ BASIC ANALYSIS AGENT RESPONSE ################")
print(response.content)
print("################ BASIC ANALYSIS END AGENT RESPONSE ################")
if "</think>" in response.content:
response.content = response.content.split("</think>")[1]
start_index = response.content.find("{")
end_index = response.content.rfind("}")
print("start index:", start_index)
print("end index:", end_index)
abstracted_string = ""
if start_index != -1 and end_index != -1 and start_index < end_index:
abstracted_string = response.content[start_index : end_index + 1]
try:
results = json.loads(abstracted_string)
print(results)
return results
except Exception as e:
print(e)
traceback.print_exc()
except Exception as e:
print(e)
traceback.print_exc()
time.sleep(30)
return {"error": "LLM response is not in correct format."}
def get_text_post_content(details, reference):
try:
prompt = PromptTemplate.from_file(
template_file="prompts/post_generator_without_source.yml",
input_variables=["NEWS_CONTENT", "CHAR_LENGTH"],
)
prompt = langfuse.get_prompt("post_generator")
user_query = prompt.compile(NEWS_CONTENT = details, CHAR_LENGTH = 490- len(reference))
response = post_content_llm.invoke(user_query, config={"callbacks": [langfuse_post_generation_handler]})
print("POST CONTENT RESPONSE:", response)
content = response.content.replace('"', '')
if "</think>" in content:
content = content.split("</think>")[1]
start_indx = content.find("#")
content = f"""{content[:start_indx]}
{reference}
{content[start_indx:]}"""
return content, True
except Exception as e:
print(e)
traceback.print_exc()
return "", False
|