|
import gradio as gr |
|
import requests |
|
import os |
|
import json |
|
|
|
api_key = os.getenv('API_KEY') |
|
|
|
def call_mistral_7b_api(content, temperature=0.2, top_p=0.7, max_tokens=1024): |
|
print(f"Conteúdo: {content}") |
|
print(f"Temperatura: {temperature}") |
|
print(f"Top P: {top_p}") |
|
print(f"Max Tokens: {max_tokens}") |
|
|
|
invoke_url = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/35ec3354-2681-4d0e-a8dd-80325dcf7c63" |
|
headers = { |
|
"Authorization": f"Bearer {api_key}", |
|
"accept": "text/event-stream", |
|
"content-type": "application/json", |
|
} |
|
|
|
payload = { |
|
"messages": [ |
|
{ |
|
"content": content, |
|
"role": "user" |
|
} |
|
], |
|
"temperature": temperature, |
|
"top_p": top_p, |
|
"max_tokens": max_tokens, |
|
"stream": True |
|
} |
|
|
|
response = requests.post(invoke_url, headers=headers, json=payload, stream=True) |
|
if response.status_code != 200: |
|
print(f"Erro na requisição: {response.status_code}") |
|
try: |
|
error_details = response.json() |
|
print(error_details) |
|
except ValueError: |
|
print(response.text) |
|
else: |
|
response_text = "" |
|
for line in response.iter_lines(): |
|
if line: |
|
decoded_line = line.decode('utf-8').strip() |
|
|
|
|
|
if decoded_line.startswith('data: {'): |
|
json_str = decoded_line[6:] |
|
|
|
try: |
|
json_line = json.loads(json_str) |
|
content_parts = json_line.get("choices", [{}])[0].get("delta", {}).get("content", "") |
|
response_text += content_parts |
|
except json.JSONDecodeError as e: |
|
print(f"Erro ao decodificar JSON: {e}") |
|
print(f"Linha problemática: {decoded_line}") |
|
elif decoded_line == 'data: [DONE]': |
|
print("Recebido sinal de conclusão da API.") |
|
break |
|
else: |
|
print(f"Linha ignorada (não é JSON ou sinal de conclusão): {decoded_line}") |
|
|
|
return response_text |
|
content_input = gr.Textbox(lines=2, placeholder="Enter your content here...", label="Content") |
|
temperature_input = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.2, label="Temperature") |
|
top_p_input = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="Top P") |
|
max_tokens_input = gr.Slider(minimum=1, maximum=1024, step=1, value=1024, label="Max Tokens") |
|
|
|
iface = gr.Interface(fn=call_mistral_7b_api, |
|
inputs=[content_input, temperature_input, top_p_input, max_tokens_input], |
|
outputs="text", |
|
title="Mistral-7B API Explorer", |
|
description=""" |
|
<div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;"> |
|
<strong>Explore the capabilities of Mistral-7B Instruct</strong> |
|
</div> |
|
<p> |
|
Dive into the world of AI with Mistral-7B Instruct, a state-of-the-art language model designed to follow instructions, complete requests, and generate creative text formats. |
|
</p> |
|
<p> |
|
<strong>How to Use:</strong> |
|
</p> |
|
<ol> |
|
<li>Type your <strong>content</strong> into the textbox, with your question or instruction.</li> |
|
<li>Adjust <strong>Temperature</strong> and <strong>Top P</strong> sliders to fine-tune the creativity and variability of the output.</li> |
|
<li>Set the <strong>Max Tokens</strong> to limit the length of the generated text.</li> |
|
<li>Click <strong>Submit</strong> to see the model's response based on your input.</li> |
|
</ol> |
|
<p> |
|
<strong>Powered by NVIDIA's advanced AI technologies, Mistral-7B API Explorer offers a unique platform for engaging with AI in text generation.</strong> |
|
</p> |
|
<p> |
|
<strong>Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>) |
|
</p> |
|
<p> |
|
<strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a> |
|
</p> |
|
""" |
|
) |
|
|
|
iface.launch() |
|
|