File size: 4,222 Bytes
60d7f69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04ec9f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60d7f69
04ec9f3
 
60d7f69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import gradio as gr
import requests
import os
import json

api_key = os.getenv('API_KEY')

def call_mistral_7b_api(content, temperature=0.2, top_p=0.7, max_tokens=1024):
    print(f"Conteúdo: {content}")
    print(f"Temperatura: {temperature}")
    print(f"Top P: {top_p}")
    print(f"Max Tokens: {max_tokens}")

    invoke_url = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/35ec3354-2681-4d0e-a8dd-80325dcf7c63"
    headers = {
        "Authorization": f"Bearer {api_key}",
        "accept": "text/event-stream",
        "content-type": "application/json",
    }

    payload = {
        "messages": [
            {
                "content": content,
                "role": "user"
            }
        ],
        "temperature": temperature,
        "top_p": top_p,
        "max_tokens": max_tokens,
        "stream": True
    }

    response = requests.post(invoke_url, headers=headers, json=payload, stream=True)
    if response.status_code != 200:
        print(f"Erro na requisição: {response.status_code}")
        try:
            error_details = response.json()
            print(error_details)
        except ValueError:
            print(response.text)
    else:
        response_text = ""
        for line in response.iter_lines():
            if line:
                decoded_line = line.decode('utf-8').strip()
    
                # Verifica se a linha contém dados JSON válidos
                if decoded_line.startswith('data: {'):
                    json_str = decoded_line[6:]  # Remove 'data: ' do início
    
                    try:
                        json_line = json.loads(json_str)
                        content_parts = json_line.get("choices", [{}])[0].get("delta", {}).get("content", "")
                        response_text += content_parts
                    except json.JSONDecodeError as e:
                        print(f"Erro ao decodificar JSON: {e}")
                        print(f"Linha problemática: {decoded_line}")
                elif decoded_line == 'data: [DONE]':
                    print("Recebido sinal de conclusão da API.")
                    break  # Sai do loop se a API indicar que terminou de enviar dados
                else:
                    print(f"Linha ignorada (não é JSON ou sinal de conclusão): {decoded_line}")
    
        return response_text
content_input = gr.Textbox(lines=2, placeholder="Enter your content here...", label="Content")
temperature_input = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.2, label="Temperature")
top_p_input = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="Top P")
max_tokens_input = gr.Slider(minimum=1, maximum=1024, step=1, value=1024, label="Max Tokens")

iface = gr.Interface(fn=call_mistral_7b_api,
                     inputs=[content_input, temperature_input, top_p_input, max_tokens_input],
                     outputs="text",
                     title="Mistral-7B API Explorer",
                     description="""
<div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
<strong>Explore the capabilities of Mistral-7B Instruct</strong>
</div>
<p>
    Dive into the world of AI with Mistral-7B Instruct, a state-of-the-art language model designed to follow instructions, complete requests, and generate creative text formats.
</p>
<p>
    <strong>How to Use:</strong>
</p>
<ol>
    <li>Type your <strong>content</strong> into the textbox, with your question or instruction.</li>
    <li>Adjust <strong>Temperature</strong> and <strong>Top P</strong> sliders to fine-tune the creativity and variability of the output.</li>
    <li>Set the <strong>Max Tokens</strong> to limit the length of the generated text.</li>
    <li>Click <strong>Submit</strong> to see the model's response based on your input.</li>
</ol>
<p>
    <strong>Powered by NVIDIA's advanced AI technologies, Mistral-7B API Explorer offers a unique platform for engaging with AI in text generation.</strong>
</p>
<p>
    <strong>Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)
</p>
<p>
    <strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a>
</p>
"""
                    )

iface.launch()