import gradio as gr import requests import json import os from dotenv import load_dotenv load_dotenv() API_URL = os.getenv("API_URL") API_TOKEN = os.getenv("API_TOKEN") if not API_URL or not API_TOKEN: raise ValueError("invalid API_URL || API_TOKEN") print(f"[INFO] starting:") print(f"[INFO] API_URL: {API_URL[:6]}...{API_URL[-12:]}") print(f"[INFO] API_TOKEN: {API_TOKEN[:10]}...{API_TOKEN[-10:]}") """ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference """ def respond( message, history: list[dict], system_message, max_tokens, temperature, top_p, ): messages = [{"role": "system", "content": system_message}] messages.extend(history) messages.append({"role": "user", "content": message}) headers = { "Content-Type": "application/json", "Authorization": f"Bearer {API_TOKEN}" } data = { "model": "/data/DMind-1", "stream": True, "messages": messages, "temperature": temperature, "top_p": top_p, "top_k": 20, "min_p": 0.1, "max_tokens": 32768 } try: with requests.post(API_URL, headers=headers, json=data, stream=True) as r: if r.status_code == 200: current_response = "" for line in r.iter_lines(): if line: line = line.decode('utf-8') if line.startswith('data: '): try: json_response = json.loads(line[6:]) if 'choices' in json_response and len(json_response['choices']) > 0: delta = json_response['choices'][0].get('delta', {}) if 'content' in delta: content = delta['content'] if content: current_response += content if len(current_response) > 16: if '' in current_response: current_response = current_response.replace('', '```') if '' in current_response: current_response = current_response.replace('', '```') if '**Final Answer**' in current_response: current_response = current_response.replace('**Final Answer**', '') formatted_response = current_response[:-16] formatted_response = formatted_response.replace('<', '<').replace('>', '>') formatted_response = formatted_response.replace('*', '\\*') yield formatted_response except json.JSONDecodeError: continue if current_response: if '' in current_response: current_response = current_response.replace('', '```') if '' in current_response: current_response = current_response.replace('', '```') if '**Final Answer**' in current_response: current_response = current_response.replace('**Final Answer**', '') formatted_response = current_response formatted_response = formatted_response.replace('<', '<').replace('>', '>') formatted_response = formatted_response.replace('*', '\\*') yield formatted_response else: print(f"[ERROR] Bad status code: {r.status_code}, response: {r.text}") yield "Service temporarily unavailable" except Exception as e: print(f"[ERROR] Request error: {e}") yield "Service error occurred" """ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface """ demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are DMind AI Assistant, built by DMind.AI. Never present as Qwen, ChatGPT, Tongyi, OpenAI, etc.", label="System message"), gr.Slider(minimum=1, maximum=32768, value=16384, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.6, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), ], type="messages", css=""" .prose pre { white-space: pre-wrap !important; word-wrap: break-word !important; overflow-wrap: break-word !important; max-width: 100% !important; } .prose code { white-space: pre-wrap !important; word-wrap: break-word !important; overflow-wrap: break-word !important; max-width: 100% !important; } .prose pre code { white-space: pre-wrap !important; word-wrap: break-word !important; overflow-wrap: break-word !important; max-width: 100% !important; } """ ) if __name__ == "__main__": demo.launch()