File size: 1,259 Bytes
0a07732 c4b57ef bd62f39 c4b57ef 5be60ea ea2be3b 0b97ff9 c4b57ef 0a07732 0b97ff9 0a07732 0b97ff9 0a07732 0b97ff9 a277290 0b97ff9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import gradio as gr
import requests
from g4f import Provider, models
from langchain.llms.base import LLM
import g4f
from langchain_g4f import G4FLLM
g4f.debug.logging = True # Enable logging
g4f.check_version = False # Disable automatic version checking
#print(g4f.version) # Check version
#print(g4f.Provider.Ails.params) # Supported args
url = "https://app.embedchain.ai/api/v1/pipelines/024a60fa-cfc3-41a2-a27b-2f6a04c1a6fe/context/"
def greet(name):
payload = {
"query": f"{name}",
"count": 25 }
headers = {
'Authorization': 'Token ec-fBwP02l3yodIa40BHkSEdhqVQmelK8pNsbrUew2J',}
response = requests.request("POST", url, headers=headers, json=payload)
print(response.text)
print(name)
c = response.text
llm = LLM = G4FLLM(model=models.gpt_35_turbo_16k )
res = llm(f"""
Use the following pieces of context to answer the query at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
${c}
Query: ${name}
Helpful Answer:
""")
print(res)
return res
iface = gr.Interface(
fn=greet,
inputs="text",
outputs=gr.Textbox(label="Réponse"),
title="bot",
description=" Chatbot-law-code-pénal ")
iface.launch()
|