File size: 5,085 Bytes
e407f50
7c466b0
e407f50
 
10b839f
 
 
7c466b0
10b839f
 
7c466b0
10b839f
7c466b0
7631ee8
10b839f
 
 
 
 
 
 
 
7631ee8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10b839f
 
7c466b0
7631ee8
 
 
 
 
 
 
10b839f
 
 
 
 
 
 
 
 
 
 
7c466b0
10b839f
 
 
9dbcd94
 
10b839f
 
 
 
 
7c466b0
10b839f
 
 
 
 
 
 
 
9d02764
10b839f
 
7c466b0
10b839f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
import gradio as gr
import aiohttp
import asyncio
import requests
import mimetypes
import json, os

LLM_API = os.environ.get("LLM_API")
LLM_URL = os.environ.get("LLM_URL")

USER_ID = "HuggingFace Space"  # Placeholder user ID

async def send_chat_message(LLM_URL, LLM_API, user_input):
    payload = {
        "inputs": {},
        "query": user_input,
        "response_mode": "streaming",
        "conversation_id": "",
        "user": USER_ID,
    }
    print("Sending chat message payload:", payload)  # Debug information

    async with aiohttp.ClientSession() as session:
        try:
            async with session.post(
                url=f"{LLM_URL}/chat-messages",
                headers={"Authorization": f"Bearer {LLM_API}"},
                json=payload,
                timeout=aiohttp.ClientTimeout(total=60)
            ) as response:
                if response.status != 200:
                    print(f"Error: {response.status}")
                    return f"Error: {response.status}"

                full_response = []
                async for line in response.content:
                    line = line.decode('utf-8').strip()
                    if not line:
                        continue
                    if "data: " not in line:
                        continue
                    try:
                        print("Received line:", line)  # Debug information
                        data = json.loads(line.split("data: ")[1])
                        if "answer" in data:
                            full_response.append(data["answer"])
                    except (IndexError, json.JSONDecodeError) as e:
                        print(f"Error parsing line: {line}, error: {e}")  # Debug information
                        continue

                if full_response:
                    return ''.join(full_response).strip()
                else:
                    return "Error: No response found in the response"
        except Exception as e:
            print(f"Exception: {e}")
            return f"Exception: {e}"

async def handle_input(user_input):
    print(f"Handling input: {user_input}")
    chat_response = await send_chat_message(LLM_URL, LLM_API, user_input)
    print("Chat response:", chat_response)  # Debug information
    return chat_response

def run_sync(func, *args):
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    result = loop.run_until_complete(func(*args))
    loop.close()
    return result

# Define Gradio interface
user_input = gr.Textbox(label='請輸入您想查詢的關鍵公司名稱')
examples = [
    ["加密貨幣"],
    ["國泰金控"],
    ["中華電信"],
    ["台灣大哥大"],
    ["台積電"],
    ["BlockTempo"],
    ["abmedia"]
]

TITLE = """<h1>Social Media Trends 💬 分析社群相關資訊,並判斷其正、負、中立等評價及趨勢 </h1>"""
SUBTITLE = """<h2><a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D. @ 2024/11 </a><br></h2>"""
LINKS = """
<a href='https://github.com/Deep-Learning-101' target='_blank'>Deep Learning 101 Github</a> | <a href='http://deeplearning101.twman.org' target='_blank'>Deep Learning 101</a> | <a href='https://www.facebook.com/groups/525579498272187/' target='_blank'>台灣人工智慧社團 FB</a> | <a href='https://www.youtube.com/c/DeepLearning101' target='_blank'>YouTube</a><br>
<a href='https://reurl.cc/g6GlZX' target='_blank'>手把手帶你一起踩AI坑</a> | <a href='https://blog.twman.org/2024/11/diffusion.html' target='_blank'>ComfyUI + Stable Diffuision</a><br>
<a href='https://blog.twman.org/2024/08/LLM.html' target='_blank'>白話文手把手帶你科普 GenAI</a> | <a href='https://blog.twman.org/2024/09/LLM.html' target='_blank'>大型語言模型直接就打完收工?</a><br>
<a href='https://blog.twman.org/2023/04/GPT.html' target='_blank'>什麼是大語言模型,它是什麼?想要嗎?</a> | <a href='https://blog.twman.org/2024/07/RAG.html' target='_blank'>那些檢索增強生成要踩的坑 </a><br>
<a href='https://blog.twman.org/2021/04/ASR.html' target='_blank'>那些語音處理 (Speech Processing) 踩的坑</a> | <a href='https://blog.twman.org/2021/04/NLP.html' target='_blank'>那些自然語言處理 (Natural Language Processing, NLP) 踩的坑</a><br>
<a href='https://blog.twman.org/2024/02/asr-tts.html' target='_blank'>那些ASR和TTS可能會踩的坑</a> | <a href='https://blog.twman.org/2024/02/LLM.html' target='_blank'>那些大模型開發會踩的坑</a><br>
<a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PPOCRLabel來幫PaddleOCR做OCR的微調和標註</a> | <a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a><br>
"""
with gr.Blocks() as iface:
    gr.HTML(TITLE)
    gr.HTML(SUBTITLE)
    gr.HTML(LINKS)
    gr.Interface(
        fn=handle_input,
        inputs=user_input,
        outputs="text",
        examples=examples,
        allow_flagging="never"
    )

iface.launch()