Spaces:
Running
Running
Update LLM.py
Browse files
LLM.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
from langchain_core.output_parsers import StrOutputParser
|
2 |
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
3 |
from langchain_groq import ChatGroq
|
4 |
-
from
|
5 |
-
from
|
6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
7 |
from dotenv import load_dotenv
|
8 |
from huggingface_hub import login
|
@@ -15,18 +15,28 @@ os.environ['CURL_CA_BUNDLE'] = ''
|
|
15 |
|
16 |
class Bot:
|
17 |
def __init__(self):
|
18 |
-
#
|
19 |
-
self.
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
self.models = self.openai_models + self.anthropic_models + self.google_models + self.groq_models
|
26 |
-
|
27 |
-
def call_openai(self, model, temp=0.7, given_prompt="Hi"):
|
28 |
try:
|
29 |
-
llm =
|
30 |
prompt = ChatPromptTemplate.from_messages([
|
31 |
("system", "You are a helpful assistant."),
|
32 |
("human", "{text}")
|
@@ -34,56 +44,43 @@ class Bot:
|
|
34 |
chain = prompt | llm | StrOutputParser()
|
35 |
return chain.invoke({"text": given_prompt})
|
36 |
except Exception as e:
|
37 |
-
return f"⚠️ [
|
38 |
|
39 |
-
def
|
40 |
try:
|
41 |
-
llm =
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
48 |
except Exception as e:
|
49 |
-
return f"⚠️ [
|
50 |
|
51 |
def call_google(self, model, temp=0.7, given_prompt="Hi"):
|
52 |
try:
|
53 |
gm = ChatGoogleGenerativeAI(model=model, temperature=temp)
|
54 |
-
prompt = ChatPromptTemplate.from_messages([
|
55 |
-
("system", "You are a helpful assistant."),
|
56 |
-
("human", "{text}")
|
57 |
-
])
|
58 |
chain = prompt | gm | StrOutputParser()
|
59 |
return chain.invoke({"text": given_prompt})
|
60 |
except Exception as e:
|
61 |
return f"⚠️ [Google:{model}] {str(e)}"
|
62 |
|
63 |
-
def call_groq(self, model, temp=0.7, given_prompt="Hi"):
|
64 |
-
try:
|
65 |
-
llm = ChatGroq(model=model, temperature=temp)
|
66 |
-
prompt = ChatPromptTemplate.from_messages([
|
67 |
-
("system", "You are a helpful assistant."),
|
68 |
-
("human", "{text}")
|
69 |
-
])
|
70 |
-
chain = prompt | llm | StrOutputParser()
|
71 |
-
return chain.invoke({"text": given_prompt})
|
72 |
-
except Exception as e:
|
73 |
-
return f"⚠️ [Groq:{model}] {str(e)}"
|
74 |
-
|
75 |
def response(self, model, prompt="Hi", temperature=0.7):
|
|
|
76 |
try:
|
77 |
-
if model in self.
|
78 |
-
return self.call_openai(model, temp=temperature, given_prompt=prompt)
|
79 |
-
elif model in self.anthropic_models:
|
80 |
-
return self.call_anthropic(model, temp=temperature, given_prompt=prompt)
|
81 |
-
elif model in self.google_models:
|
82 |
-
return self.call_google(model, temp=temperature, given_prompt=prompt)
|
83 |
-
elif model in self.groq_models:
|
84 |
return self.call_groq(model, temp=temperature, given_prompt=prompt)
|
85 |
-
|
86 |
-
return
|
|
|
|
|
|
|
87 |
except Exception as e:
|
88 |
-
return f"⚠️ Skipping
|
|
|
89 |
|
|
|
1 |
from langchain_core.output_parsers import StrOutputParser
|
2 |
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
3 |
from langchain_groq import ChatGroq
|
4 |
+
from langchain_huggingface import ChatHuggingFace
|
5 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
7 |
from dotenv import load_dotenv
|
8 |
from huggingface_hub import login
|
|
|
15 |
|
16 |
class Bot:
|
17 |
def __init__(self):
|
18 |
+
# Updated model lists: Remove gated or unsupported IDs
|
19 |
+
self.groq_models = [
|
20 |
+
'gemma-7b-it',
|
21 |
+
'llama3-70b-8192',
|
22 |
+
'llama3-8b-8192',
|
23 |
+
'mixtral-8x22b'
|
24 |
+
]
|
25 |
+
self.hf_models = [
|
26 |
+
"01-ai/Yi-1.5-34B-Chat",
|
27 |
+
#"google/gemma-1.1-2b-it"
|
28 |
+
]
|
29 |
+
# Use supported Google GenAI model names
|
30 |
+
self.google_models = [
|
31 |
+
"gemini-pro",
|
32 |
+
"gemini-pro-vision"
|
33 |
+
]
|
34 |
+
# Master list for sampling (only include accessible models)
|
35 |
+
self.models = self.google_models + self.hf_models + self.groq_models
|
36 |
|
37 |
+
def call_groq(self, model, temp=0.7, given_prompt="Hi"):
|
|
|
|
|
|
|
38 |
try:
|
39 |
+
llm = ChatGroq(model=model, temperature=temp)
|
40 |
prompt = ChatPromptTemplate.from_messages([
|
41 |
("system", "You are a helpful assistant."),
|
42 |
("human", "{text}")
|
|
|
44 |
chain = prompt | llm | StrOutputParser()
|
45 |
return chain.invoke({"text": given_prompt})
|
46 |
except Exception as e:
|
47 |
+
return f"⚠️ [Groq:{model}] {str(e)}"
|
48 |
|
49 |
+
def call_hf(self, model, temp=0.7, given_prompt="Hi"):
|
50 |
try:
|
51 |
+
llm = HuggingFaceEndpoint(repo_id=model, temperature=temp)
|
52 |
+
chat = ChatHuggingFace(llm=llm, verbose=True)
|
53 |
+
template = """
|
54 |
+
You are a helpful assistant.
|
55 |
+
User: {query}
|
56 |
+
Answer:
|
57 |
+
"""
|
58 |
+
prompt = PromptTemplate(template=template, input_variables=["query"])
|
59 |
+
chain = prompt | chat | StrOutputParser()
|
60 |
+
return chain.invoke({"query": given_prompt})
|
61 |
except Exception as e:
|
62 |
+
return f"⚠️ [HF:{model}] {str(e)}"
|
63 |
|
64 |
def call_google(self, model, temp=0.7, given_prompt="Hi"):
|
65 |
try:
|
66 |
gm = ChatGoogleGenerativeAI(model=model, temperature=temp)
|
67 |
+
prompt = ChatPromptTemplate.from_messages([("system", "You are a helpful assistant."), ("human", "{text}")])
|
|
|
|
|
|
|
68 |
chain = prompt | gm | StrOutputParser()
|
69 |
return chain.invoke({"text": given_prompt})
|
70 |
except Exception as e:
|
71 |
return f"⚠️ [Google:{model}] {str(e)}"
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
def response(self, model, prompt="Hi", temperature=0.7):
|
74 |
+
# Route to the correct provider and catch errors
|
75 |
try:
|
76 |
+
if model in self.groq_models:
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
return self.call_groq(model, temp=temperature, given_prompt=prompt)
|
78 |
+
if model in self.hf_models:
|
79 |
+
return self.call_hf(model, temp=temperature, given_prompt=prompt)
|
80 |
+
if model in self.google_models:
|
81 |
+
return self.call_google(model, temp=temperature, given_prompt=prompt)
|
82 |
+
return f"❌ Unsupported model: {model}"
|
83 |
except Exception as e:
|
84 |
+
return f"⚠️ Skipping {model} due to error: {str(e)}"
|
85 |
+
|
86 |
|