File size: 7,474 Bytes
b8c24aa
3a82207
63b82b4
 
 
 
 
c8fdb3b
3a82207
4e81072
7dc3087
31350b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deaeb85
00f3401
 
 
 
 
 
08c1bd3
31350b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc30e1c
 
 
c50d196
a77d753
31350b4
 
 
536effb
160f6d0
a1f83cd
160f6d0
31350b4
 
 
 
 
 
 
 
47ceafa
31350b4
 
d2f3905
31350b4
 
 
 
 
7dc3087
ea9c0d3
7115ad7
a9c8381
 
 
 
ea9c0d3
7dc3087
31350b4
4e309e2
 
 
 
 
 
63b82b4
c7f7d96
31350b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47ceafa
31350b4
 
 
 
 
 
 
 
 
 
 
 
47ceafa
31350b4
 
 
 
 
 
 
 
 
 
3a82207
31350b4
 
 
 
 
 
 
 
 
 
 
47ceafa
31350b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00f3401
31350b4
 
00f3401
31350b4
 
3a82207
31350b4
 
 
 
3a82207
31350b4
3a82207
63b82b4
 
31350b4
63b82b4
e2534da
63b82b4
 
 
 
 
 
 
00f3401
63b82b4
 
 
 
ea9c0d3
63b82b4
 
 
 
9a34670
63b82b4
f9d31d0
63b82b4
3a82207
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
import gradio as gr
import torch
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    TextIteratorStreamer,
)
import os
from threading import Thread
import spaces
import time

import langchain
import os
import glob
import gc 

# loaders
from langchain.document_loaders import PyPDFLoader, DirectoryLoader

# splits
from langchain.text_splitter import RecursiveCharacterTextSplitter

# prompts
from langchain import PromptTemplate

# vector stores
from langchain_community.vectorstores import FAISS

# models
from langchain.llms import HuggingFacePipeline
from langchain.embeddings import HuggingFaceInstructEmbeddings

# retrievers
from langchain.chains import RetrievalQA


import subprocess

subprocess.run(
    "pip install flash-attn --no-build-isolation",
    env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
    shell=True,
)


class CFG:
    DEBUG = False
    
    ### LLM
    model_name = 'justinj92/phi3-orpo'
    temperature = 0.7
    top_p = 0.90
    repetition_penalty = 1.15
    max_len = 8192
    max_new_tokens = 512

    ### splitting
    split_chunk_size = 800
    split_overlap = 400
    
    ### embeddings
    embeddings_model_repo = 'BAAI/bge-base-en-v1.5'

    ### similar passages
    k = 6
    
    ### paths
    PDFs_path = './data'
    Embeddings_path =  './embeddings/input'
    Output_folder = './ml-papers-vector'
    
loader = DirectoryLoader(CFG.PDFs_path, glob="*.pdf", loader_cls=PyPDFLoader)

documents = loader.load()


text_splitter = RecursiveCharacterTextSplitter(chunk_size = CFG.split_chunk_size, chunk_overlap = CFG.split_overlap)
texts = text_splitter.split_documents(documents)

if not os.path.exists(CFG.Embeddings_path + '/index.faiss'):
    embeddings = HuggingFaceInstructEmbeddings(model_name = CFG.embeddings_model_repo, model_kwargs={"device":"cuda"})
    vectordb = FAISS.from_documents(documents=texts, embedding=embeddings)
    vectordb.save_local(f"{CFG.Output_folder}/faiss_index_ml_papers")

embeddings = HuggingFaceInstructEmbeddings(model_name = CFG.embeddings_model_repo, model_kwargs={"device":"cuda"})
vectordb = FAISS.load_local(CFG.Output_folder + '/faiss_index_ml_papers', embeddings, allow_dangerous_deserialization=True)

@spaces.GPU
def build_model(model_repo = CFG.model_name):
    tokenizer = AutoTokenizer.from_pretrained(model_repo)
    model = AutoModelForCausalLM.from_pretrained(model_repo, attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16)

    return tokenizer, model


tok, model = build_model(model_repo = CFG.model_name)

terminators = [
    tok.eos_token_id,
    32007,
    32011,
    32001,
    32000
]


# if torch.cuda.is_available():
#     device = torch.device("cuda")
#     print(f"Using GPU: {torch.cuda.get_device_name(device)}")
# else:
#     device = torch.device("cpu")
#     print("Using CPU")

model = model.to(device)

pipe = pipeline(task="text-generation", model=model, tokenizer=tok, eos_token_id=terminators, do_sample=True, max_new_tokens=CFG.max_new_tokens, temperature=CFG.temperature, top_p=CFG.top_p, repetition_penalty=CFG.repetition_penalty)

llm = HuggingFacePipeline(pipeline = pipe)

prompt_template = """
<|system|>

You are an expert assistant that answers questions about machine learning and Large Language Models (LLMs).

You are given some extracted parts from machine learning papers along with a question.

If you don't know the answer, just say "I don't know." Don't try to make up an answer.

It is very important that you ALWAYS answer the question in the same language the question is in. Remember to always do that.

Use only the following pieces of context to answer the question at the end.

<|end|>

<|user|>

Context: {context}

Question is below. Remember to answer in the same language:

Question: {question}

<|end|>

<|assistant|>

"""


PROMPT = PromptTemplate(
    template = prompt_template, 
    input_variables = ["context", "question"]
)

retriever = vectordb.as_retriever(
    search_type = "similarity",
    search_kwargs = {"k": CFG.k}
)

qa_chain = RetrievalQA.from_chain_type(
    llm = llm,
    chain_type = "stuff", # map_reduce, map_rerank, stuff, refine
    retriever = retriever, 
    chain_type_kwargs = {"prompt": PROMPT},
    return_source_documents = True,
    verbose = False
)

@spaces.GPU
def wrap_text_preserve_newlines(text, width=1500):
    # Split the input text into lines based on newline characters
    lines = text.split('\n')

    # Wrap each line individually
    wrapped_lines = [textwrap.fill(line, width=width) for line in lines]

    # Join the wrapped lines back together using newline characters
    wrapped_text = '\n'.join(wrapped_lines)

    return wrapped_text

@spaces.GPU
def process_llm_response(llm_response):
    ans = wrap_text_preserve_newlines(llm_response['result'])
    
    sources_used = ' \n'.join(
        [
            source.metadata['source'].split('/')[-1][:-4]
            + ' - page: '
            + str(source.metadata['page'])
            for source in llm_response['source_documents']
        ]
    )
    
    ans = ans + '\n\nSources: \n' + sources_used
    
    ### return only the text after the pattern
    pattern = "<|assistant|>"
    index = ans.find(pattern)
    if index != -1:
        ans = ans[index + len(pattern):]    
    
    return ans.strip()
    
@spaces.GPU
def llm_ans(query):
    
    llm_response = qa_chain.invoke(query)
    ans = process_llm_response(llm_response)
    
    return ans


# @spaces.GPU(duration=60)
# def chat(message, history, temperature, do_sample, max_tokens):
#     chat = [{"role": "system", "content": "You are ORPO Tuned Phi Beast. Answer all questions in the most helpful way. No yapping."}]
#     for item in history:
#         chat.append({"role": "user", "content": item[0]})
#         if item[1] is not None:
#             chat.append({"role": "assistant", "content": item[1]})
#     chat.append({"role": "user", "content": message})
#     messages = tok.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
#     model_inputs = tok([messages], return_tensors="pt").to(device)
#     streamer = TextIteratorStreamer(
#         tok, timeout=20.0, skip_prompt=True, skip_special_tokens=True
#     )
#     generate_kwargs = dict(
#         model_inputs,
#         streamer=streamer,
#         max_new_tokens=max_tokens,
#         do_sample=True,
#         temperature=temperature,
#         eos_token_id=terminators,
#     )

#     if temperature == 0:
#         generate_kwargs["do_sample"] = False

#     t = Thread(target=model.generate, kwargs=generate_kwargs)
#     t.start()

#     partial_text = ""
#     for new_text in streamer:
#         partial_text += new_text
#         yield partial_text

#     yield partial_text


demo = gr.ChatInterface(
    fn=llm_ans,
    examples=[["Write me a poem about Machine Learning."]],
    # multimodal=False,
    additional_inputs_accordion=gr.Accordion(
        label="⚙️ Parameters", open=False, render=False
    ),
    additional_inputs=[
        gr.Slider(
            minimum=0, maximum=1, step=0.1, value=0.9, label="Temperature", render=False
        ),
        gr.Checkbox(label="Sampling", value=True),
        gr.Slider(
            minimum=128,
            maximum=4096,
            step=1,
            value=512,
            label="Max new tokens",
            render=False,
        ),
    ],
    stop_btn="Stop Generation",
    title="Chat With LLMs",
    description="Now Running Phi3-ORPO",
)
demo.launch()