File size: 1,588 Bytes
62b6724
cbfaa55
 
 
 
 
62b6724
cbfaa55
 
 
2d8429f
cbfaa55
 
 
 
 
 
 
 
 
62b6724
cbfaa55
 
 
 
 
 
 
62b6724
2d8429f
 
 
cbfaa55
2d8429f
cbfaa55
2d8429f
 
cbfaa55
0d886d0
62b6724
2d8429f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62b6724
 
2d8429f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
import os
import torch, time
from huggingface_hub import hf_hub_download, snapshot_download, login
import sys
import gc

login(token=os.environ["HF_TOKEN"])
tokenizer = os.environ["TOKENIZER"]
repo_id = os.environ["REPO_ID"]

torch.set_num_threads(1)
if torch.cuda.is_available():
    torch.backends.cudnn.benchmark = True

try:
    generate_file = hf_hub_download(
        repo_id=repo_id,
        filename="gen.py",
        token=os.environ["HF_TOKEN"]
    )
    os.system(f"cp {generate_file} ./gen.py")
except Exception as e:
    print(f"Error downloading files: {e}")
    model_path = None

sys.path.append('.')
from gen import chat_interface

with gr.Blocks(title="Madhuram Model Chat") as demo:
    with gr.Column(elem_id="main-container"):
        gr.Markdown("# Madhuram Chat Interface", elem_classes="center-text")
        
        chatbot = gr.ChatInterface(
            fn=chat_interface,
            type="messages"
        )
        
        gr.Markdown("*Disclaimer - This is a demo version of Madhuram. It may occasionally generate incorrect or incomplete responses. Please verify important information independently. The complete model will be available through our own playground where the missing features will be incorporated.*", elem_classes="disclaimer")

demo.css = """
#main-container {
    max-width: 800px;
    margin: 0 auto;
    padding: 20px;
}
.center-text {
    text-align: center;
}
.disclaimer {
    text-align: center;
    color: #666;
    font-size: 0.9em;
    margin-bottom: 20px;
}
"""

if __name__ == "__main__":
    demo.launch()