Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
| 1 |
import os
|
| 2 |
import sys
|
|
|
|
| 3 |
from langchain.chains import ConversationalRetrievalChain
|
| 4 |
from langchain.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
|
| 5 |
from langchain.text_splitter import CharacterTextSplitter
|
| 6 |
from langchain.vectorstores import Chroma
|
| 7 |
from langchain.embeddings import HuggingFaceEmbeddings
|
| 8 |
from langchain.llms.base import LLM
|
| 9 |
-
from huggingface_hub import InferenceClient
|
| 10 |
import gradio as gr
|
| 11 |
|
| 12 |
# workaround for sqlite in HF spaces
|
|
@@ -44,36 +44,43 @@ vectorstore = Chroma(
|
|
| 44 |
vectorstore.add_texts(texts=texts, metadatas=metadatas)
|
| 45 |
vectorstore.persist()
|
| 46 |
|
| 47 |
-
# π Get
|
| 48 |
-
|
| 49 |
-
if
|
| 50 |
-
raise ValueError("
|
| 51 |
|
| 52 |
-
|
|
|
|
| 53 |
|
| 54 |
-
#
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
class HuggingFaceInferenceLLM(LLM):
|
| 59 |
-
"""LLM that queries HuggingFace Inference API."""
|
| 60 |
-
|
| 61 |
-
client: InferenceClient = client
|
| 62 |
|
| 63 |
def _call(self, prompt, stop=None, run_manager=None, **kwargs):
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
@property
|
| 73 |
def _llm_type(self) -> str:
|
| 74 |
-
return "
|
| 75 |
|
| 76 |
-
llm =
|
| 77 |
|
| 78 |
# π Conversational chain
|
| 79 |
chain = ConversationalRetrievalChain.from_llm(
|
|
|
|
| 1 |
import os
|
| 2 |
import sys
|
| 3 |
+
import requests
|
| 4 |
from langchain.chains import ConversationalRetrievalChain
|
| 5 |
from langchain.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
|
| 6 |
from langchain.text_splitter import CharacterTextSplitter
|
| 7 |
from langchain.vectorstores import Chroma
|
| 8 |
from langchain.embeddings import HuggingFaceEmbeddings
|
| 9 |
from langchain.llms.base import LLM
|
|
|
|
| 10 |
import gradio as gr
|
| 11 |
|
| 12 |
# workaround for sqlite in HF spaces
|
|
|
|
| 44 |
vectorstore.add_texts(texts=texts, metadatas=metadatas)
|
| 45 |
vectorstore.persist()
|
| 46 |
|
| 47 |
+
# π Get DeepSeek API key from env
|
| 48 |
+
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
| 49 |
+
if DEEPSEEK_API_KEY is None:
|
| 50 |
+
raise ValueError("DEEPSEEK_API_KEY environment variable is not set.")
|
| 51 |
|
| 52 |
+
# π DeepSeek API endpoint
|
| 53 |
+
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"
|
| 54 |
|
| 55 |
+
# π· Wrap DeepSeek API into LangChain LLM
|
| 56 |
+
class DeepSeekLLM(LLM):
|
| 57 |
+
"""LLM that queries DeepSeek's API."""
|
| 58 |
+
api_key: str = DEEPSEEK_API_KEY
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
def _call(self, prompt, stop=None, run_manager=None, **kwargs):
|
| 61 |
+
headers = {
|
| 62 |
+
"Authorization": f"Bearer {self.api_key}",
|
| 63 |
+
"Content-Type": "application/json"
|
| 64 |
+
}
|
| 65 |
+
payload = {
|
| 66 |
+
"model": "deepseek-chat", # adjust if you have a specific model name
|
| 67 |
+
"messages": [
|
| 68 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 69 |
+
{"role": "user", "content": prompt}
|
| 70 |
+
],
|
| 71 |
+
"temperature": 0.7,
|
| 72 |
+
"max_tokens": 512
|
| 73 |
+
}
|
| 74 |
+
response = requests.post(DEEPSEEK_API_URL, headers=headers, json=payload)
|
| 75 |
+
response.raise_for_status()
|
| 76 |
+
data = response.json()
|
| 77 |
+
return data["choices"][0]["message"]["content"].strip()
|
| 78 |
|
| 79 |
@property
|
| 80 |
def _llm_type(self) -> str:
|
| 81 |
+
return "deepseek_api"
|
| 82 |
|
| 83 |
+
llm = DeepSeekLLM()
|
| 84 |
|
| 85 |
# π Conversational chain
|
| 86 |
chain = ConversationalRetrievalChain.from_llm(
|