File size: 9,543 Bytes
3ec9224
5be8df6
5db4902
5be8df6
5db4902
5be8df6
a0d7f95
80f4c28
a0d7f95
1ef8d7c
 
aa98840
9bf736d
5be8df6
099bb87
336c110
099bb87
 
 
 
 
 
 
 
 
 
 
 
 
a0d7f95
b1ec9ac
5be8df6
099bb87
 
5be8df6
 
 
 
099bb87
 
 
 
 
 
80f4c28
099bb87
 
5be8df6
a0d7f95
099bb87
 
 
 
 
 
 
5be8df6
099bb87
5be8df6
099bb87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5be8df6
a0d7f95
099bb87
 
 
 
 
 
 
 
 
ca17588
00bd139
5be8df6
a0d7f95
9bf736d
 
099bb87
 
 
 
 
 
 
 
 
 
 
9bf736d
099bb87
 
9bf736d
ca17588
9bf736d
099bb87
 
ca17588
099bb87
 
 
 
 
 
5be8df6
00bd139
099bb87
5be8df6
099bb87
 
 
 
 
 
 
 
 
00bd139
099bb87
5be8df6
099bb87
 
 
 
 
 
 
 
 
 
9733941
099bb87
80f4c28
099bb87
80f4c28
099bb87
 
 
 
 
80f4c28
5be8df6
2230109
099bb87
 
 
 
 
 
 
 
2230109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
import gradio as gr
import os
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.llms import HuggingFaceEndpoint
from langchain.memory import ConversationBufferMemory
from pathlib import Path
import chromadb
from unidecode import unidecode
import re

# Lista de modelos LLM disponíveis
list_llm = [
    "mistralai/Mistral-7B-Instruct-v0.2",
    "mistralai/Mixtral-8x7B-Instruct-v0.1",
    "mistralai/Mistral-7B-Instruct-v0.1",
    "google/gemma-7b-it",
    "google/gemma-2b-it",
    "HuggingFaceH4/zephyr-7b-beta",
    "HuggingFaceH4/zephyr-7b-gemma-v0.1",
    "meta-llama/Llama-2-7b-chat-hf",
    "microsoft/phi-2",
    "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
    "mosaicml/mpt-7b-instruct",
    "tiiuae/falcon-7b-instruct",
    "google/flan-t5-xxl"
]
list_llm_simple = [os.path.basename(llm) for llm in list_llm]

# Função para carregar documentos PDF e dividir em chunks
def load_doc(list_file_path, chunk_size, chunk_overlap):
    loaders = [PyPDFLoader(x) for x in list_file_path]
    pages = []
    for loader in loaders:
        pages.extend(loader.load())
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap
    )
    doc_splits = text_splitter.split_documents(pages)
    return doc_splits

# Função para criar o banco de dados vetorial
def create_db(splits, collection_name):
    embedding = HuggingFaceEmbeddings()
    new_client = chromadb.PersistentClient(path="./chroma_db")
    vectordb = Chroma.from_documents(
        documents=splits,
        embedding=embedding,
        client=new_client,
        collection_name=collection_name,
    )
    return vectordb

# Função para inicializar a cadeia de QA com o modelo LLM
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
    progress(0.1, desc="Inicializando tokenizer da HF...")
    progress(0.5, desc="Inicializando Hub da HF...")
    if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
        llm = HuggingFaceEndpoint(
            repo_id=llm_model,
            temperature=temperature,
            max_new_tokens=max_tokens,
            top_k=top_k,
            load_in_8bit=True,
        )
    elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1", "mosaicml/mpt-7b-instruct"]:
        raise gr.Error("O modelo LLM é muito grande para ser carregado automaticamente no endpoint de inferência gratuito")
    elif llm_model == "microsoft/phi-2":
        llm = HuggingFaceEndpoint(
            repo_id=llm_model,
            temperature=temperature,
            max_new_tokens=max_tokens,
            top_k=top_k,
            trust_remote_code=True,
            torch_dtype="auto",
        )
    elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
        llm = HuggingFaceEndpoint(
            repo_id=llm_model,
            temperature=temperature,
            max_new_tokens=250,
            top_k=top_k,
        )
    elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
        raise gr.Error("O modelo Llama-2-7b-chat-hf requer uma assinatura Pro...")
    else:
        llm = HuggingFaceEndpoint(
            repo_id=llm_model,
            temperature=temperature,
            max_new_tokens=max_tokens,
            top_k=top_k,
        )

    progress(0.75, desc="Definindo memória de buffer...")
    memory = ConversationBufferMemory(
        memory_key="chat_history",
        output_key='answer',
        return_messages=True
    )
    retriever = vector_db.as_retriever()
    progress(0.8, desc="Definindo cadeia de recuperação...")
    qa_chain = ConversationalRetrievalChain.from_llm(
        llm,
        retriever=retriever,
        chain_type="stuff",
        memory=memory,
        return_source_documents=True,
        verbose=False,
    )
    progress(0.9, desc="Concluído!")
    return qa_chain

# Função para gerar um nome de coleção válido
def create_collection_name(filepath):
    collection_name = Path(filepath).stem
    collection_name = collection_name.replace(" ", "-")
    collection_name = unidecode(collection_name)
    collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
    collection_name = collection_name[:50]
    if len(collection_name) < 3:
        collection_name = collection_name + 'xyz'
    if not collection_name[0].isalnum():
        collection_name = 'A' + collection_name[1:]
    if not collection_name[-1].isalnum():
        collection_name = collection_name[:-1] + 'Z'
    return collection_name

# Função para inicializar o banco de dados
def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
    list_file_path = [x.name for x in list_file_obj if x is not None]
    progress(0.1, desc="Criando nome da coleção...")
    collection_name = create_collection_name(list_file_path[0])
    progress(0.25, desc="Carregando documento...")
    doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
    progress(0.5, desc="Gerando banco de dados vetorial...")
    vector_db = create_db(doc_splits, collection_name)
    progress(0.9, desc="Concluído!")
    return vector_db, collection_name, "Completo!"

# Função para inicializar o modelo LLM
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
    llm_name = list_llm[llm_option]
    qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
    return qa_chain, "Completo!"

# Função para formatar o histórico de conversa
def format_chat_history(message, chat_history):
    formatted_chat_history = []
    for user_message, bot_message in chat_history:
        formatted_chat_history.append(f"Usuário: {user_message}")
        formatted_chat_history.append(f"Assistente: {bot_message}")
    return formatted_chat_history

# Função para realizar a conversa com o chatbot
def conversation(qa_chain, message, history):
    formatted_chat_history = format_chat_history(message, history)
    response = qa_chain({"question": message, "chat_history": formatted_chat_history})
    response_answer = response["answer"]
    if response_answer.find("Resposta útil:") != -1:
        response_answer = response_answer.split("Resposta útil:")[-1]
    response_sources = response["source_documents"]
    response_source1 = response_sources[0].page_content.strip()
    response_source2 = response_sources[1].page_content.strip()
    response_source3 = response_sources[2].page_content.strip()
    response_source1_page = response_sources[0].metadata["page"] + 1
    response_source2_page = response_sources[1].metadata["page"] + 1
    response_source3_page = response_sources[2].metadata["page"] + 1
    new_history = history + [(message, response_answer)]
    return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page

# Função para carregar arquivos
def upload_file(file_obj):
    list_file_path = []
    for idx, file in enumerate(file_obj):
        file_path = file_obj.name
        list_file_path.append(file_path)
    return list_file_path

def demo():
    with gr.Blocks() as demo:
        vector_db = gr.State()
        qa_chain = gr.State()
        collection_name = gr.State()
        
        gr.Markdown(
        """<center><h2>Chatbot baseado em PDF</center></h2>
        <h3>Faça qualquer pergunta sobre seus documentos PDF</h3>""")
        gr.Markdown(
        """<b>Nota:</b> Este assistente de IA, utilizando Langchain e LLMs de código aberto, realiza geração aumentada por recuperação, usando as informações dos documentos PDF carregados.""")

        with gr.Tab("Iniciar Chatbot"):
            with gr.Column():
                gr.Markdown("Por favor, faça upload de um ou mais arquivos PDF.")
                file_upload = gr.File(label="Carregar PDFs", file_count="multiple", file_types=["pdf"])
                chunk_size = gr.Slider(minimum=500, maximum=1500, step=100, label="Tamanho do Chunk", value=1000)
                chunk_overlap = gr.Slider(minimum=0, maximum=500, step=10, label="Sobreposição do Chunk", value=100)
                gr.Markdown("<center><h3>Escolha o modelo LLM desejado:</h3></center>")
                llm_option = gr.Dropdown(choices=list_llm_simple, value=list_llm_simple[0])
                gr.Markdown(
                    """<center><h3>Escolha os parâmetros do LLM desejados:</h3></center>""")
                llm_temperature = gr.Slider(minimum=0, maximum=1, step=0.01, label="Temperatura", value=0.7)
                max_tokens = gr.Slider(minimum=100, maximum=500, step=10, label="Tokens Máximos", value=150)
                top_k = gr.Slider(minimum=10, maximum=50, step=5, label="Top-K", value=40)

                progress = gr.Progress()
                progress.clear()
                progress_progress = gr.Progress()
                submit_button = gr.Button("Iniciar Chatbot")

                submit_button.click(initialize_database, inputs=[file_upload, chunk_size, chunk_overlap], outputs=[vector_db, collection_name, progress])
                submit_button.click(initialize_LLM, inputs=[llm_option, llm_temperature, max_tokens, top_k, vector_db], outputs=[qa_chain, progress])
    return demo

demo().launch()