Hilda Cran May zac commited on
Commit
bcb7b65
Β·
0 Parent(s):

Duplicate from zac/llama-cpp-python

Browse files

Co-authored-by: isAAC <[email protected]>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +79 -0
  4. requirements.txt +11 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: (LLM on CPU)/ DOC Readers
3
+ emoji: πŸ‘Ύ
4
+ colorFrom: pink
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.37.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: zac/llama-cpp-python
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import copy
3
+ import time
4
+ import ctypes #to run on C api directly
5
+ import llama_cpp
6
+ from llama_cpp import Llama
7
+ from huggingface_hub import hf_hub_download #load from huggingfaces
8
+ from dotenv import load_dotenv
9
+ from PyPDF2 import PdfReader
10
+ from langchain.text_splitter import CharacterTextSplitter
11
+ from langchain.vectorstores import FAISS
12
+ from langchain.chat_models import ChatOpenAI
13
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
14
+ from langchain.memory import ConversationBufferMemory
15
+ from langchain.chains import ConversationalRetrievalChain
16
+
17
+
18
+ llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/Dolphin-Llama2-7B-GGML", filename="dolphin-llama2-7b.ggmlv3.q4_1.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
19
+
20
+ history = []
21
+
22
+ pre_prompt = " The user and the AI are having a conversation : <|endoftext|> \n "
23
+
24
+ def get_pdf_text(pdfs):
25
+ text=""
26
+ for pdf in pdfs:
27
+ pdf_reader = PdfReader(pdf)
28
+ for page in pdf_reader.pages:
29
+ text+= page.extract_text()
30
+ return text
31
+
32
+ def get_text_chunks(text):
33
+ text_splitter = CharacterTextSplitter(separator="\n",
34
+ chunk_size=1000, chunk_overlap = 200, length_function=len)
35
+ chunks = text_splitter.split_text(text)
36
+ return chunks
37
+
38
+ def get_vectorstore(text_chunks):
39
+ embeddings = OpenAIEmbeddings()
40
+ # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
41
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
42
+ return vectorstore
43
+
44
+ def generate_text(input_text, history):
45
+ print("history ",history)
46
+ print("input ", input_text)
47
+ temp =""
48
+ if history == []:
49
+ input_text_with_history = f"SYSTEM:{pre_prompt}"+ "\n" + f"USER: {input_text} " + "\n" +" ASSISTANT:"
50
+ else:
51
+ input_text_with_history = f"{history[-1][1]}"+ "\n"
52
+ input_text_with_history += f"USER: {input_text}" + "\n" +" ASSISTANT:"
53
+ print("new input", input_text_with_history)
54
+ output = llm(input_text_with_history, max_tokens=1024, stop=["<|prompter|>", "<|endoftext|>", "<|endoftext|> \n","ASSISTANT:","USER:","SYSTEM:"], stream=True)
55
+ for out in output:
56
+ stream = copy.deepcopy(out)
57
+ print(stream["choices"][0]["text"])
58
+ temp += stream["choices"][0]["text"]
59
+ yield temp
60
+
61
+
62
+ history =["init",input_text_with_history]
63
+
64
+
65
+
66
+ demo = gr.ChatInterface(generate_text,
67
+ title="LLM on CPU",
68
+ description="Running LLM with https://github.com/abetlen/llama-cpp-python. btw the text streaming thing was the hardest thing to impliment",
69
+ examples=["Hello", "Am I cool?", "Are tomatoes vegetables?"],
70
+ cache_examples=True,
71
+ retry_btn=None,
72
+ undo_btn="Delete Previous",
73
+ clear_btn="Clear",)
74
+ demo.queue(concurrency_count=1, max_size=5)
75
+ demo.launch()
76
+
77
+
78
+
79
+
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ llama-cpp-python
2
+ huggingface_hub
3
+
4
+ langchain==0.0.222
5
+ PyPDF2==3.0.1
6
+ python-dotenv==1.0.0
7
+ streamlit==1.24.0
8
+ streamlit_chat==0.1.1
9
+ openai==0.27.8
10
+ tiktoken==0.4.0
11
+ faiss-cpu