AfriAISolutions commited on
Commit
998fb7a
·
verified ·
1 Parent(s): 7f69459

Create app1.py

Browse files
Files changed (1) hide show
  1. app1.py +129 -0
app1.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import gradio as gr
4
+ from PIL import Image
5
+ from huggingface_hub import login
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
7
+ from quanto import quantize_model
8
+
9
+ # === [AUTHENTICATION] ===
10
+ hf_token = os.getenv("hf_token")
11
+ if hf_token is None:
12
+ raise ValueError("Please set HF_TOKEN environment variable with your Hugging Face token")
13
+ login(token=hf_token)
14
+
15
+ # === [TRANSLATOR] ===
16
+ translator = pipeline("translation", model="facebook/nllb-200-distilled-600M")
17
+
18
+ # === [LOAD & QUANTIZE MODEL] ===
19
+ model_name = "ContactDoctor/Bio-Medical-Llama-3-2-1B-CoT-012025"
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
21
+
22
+ print("Loading base model...")
23
+ model = AutoModelForCausalLM.from_pretrained(
24
+ model_name,
25
+ torch_dtype=torch.float16,
26
+ device_map="auto"
27
+ )
28
+
29
+ print("Quantizing model...")
30
+ quantized_model = quantize_model(model, bits=4)
31
+
32
+ print("Initializing pipeline...")
33
+ text_gen_pipeline = pipeline(
34
+ "text-generation",
35
+ model=quantized_model,
36
+ tokenizer=tokenizer,
37
+ torch_dtype=torch.float16,
38
+ device_map="auto"
39
+ )
40
+
41
+ # === [SYSTEM MESSAGE] ===
42
+ system_message = {
43
+ "role": "system",
44
+ "content": (
45
+ "You are a helpful, respectful, and knowledgeable medical assistant developed by the AI team at AfriAI Solutions, Senegal. "
46
+ "Provide brief, clear definitions when answering medical questions. After giving a concise response, ask the user if they would like more information about symptoms, causes, or treatments. "
47
+ "Always encourage users to consult healthcare professionals for personalized advice."
48
+ )
49
+ }
50
+
51
+ messages = [system_message]
52
+ max_history = 10
53
+
54
+ salutations = ["bonjour", "salut", "bonsoir", "coucou"]
55
+ remerciements = ["merci", "je vous remercie", "thanks"]
56
+ au_revoir = ["au revoir", "à bientôt", "bye", "bonne journée", "à la prochaine"]
57
+
58
+ def detect_smalltalk(user_input):
59
+ lower_input = user_input.lower().strip()
60
+ if any(phrase in lower_input for phrase in salutations):
61
+ return "Bonjour ! Comment puis-je vous aider aujourd'hui ?", True
62
+ if any(phrase in lower_input for phrase in remerciements):
63
+ return "Avec plaisir ! Souhaitez-vous poser une autre question médicale ?", True
64
+ if any(phrase in lower_input for phrase in au_revoir):
65
+ return "Au revoir ! Prenez soin de votre santé et n'hésitez pas à revenir si besoin.", True
66
+ return "", False
67
+
68
+ def medical_chatbot(user_input):
69
+ global messages
70
+
71
+ smalltalk_response, handled = detect_smalltalk(user_input)
72
+ if handled:
73
+ return smalltalk_response
74
+
75
+ translated = translator(user_input, src_lang="fra_Latn", tgt_lang="eng_Latn")[0]['translation_text']
76
+
77
+ messages.append({"role": "user", "content": translated})
78
+ if len(messages) > max_history * 2:
79
+ messages = [system_message] + messages[-max_history * 2:]
80
+
81
+ prompt = text_gen_pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
82
+
83
+ response = text_gen_pipeline(
84
+ prompt,
85
+ max_new_tokens=1024,
86
+ do_sample=True,
87
+ temperature=0.4,
88
+ top_k=150,
89
+ top_p=0.75,
90
+ eos_token_id=[
91
+ text_gen_pipeline.tokenizer.eos_token_id,
92
+ text_gen_pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
93
+ ]
94
+ )
95
+
96
+ output = response[0]['generated_text'][len(prompt):].strip()
97
+ translated_back = translator(output, src_lang="eng_Latn", tgt_lang="fra_Latn")[0]['translation_text']
98
+
99
+ messages.append({"role": "assistant", "content": translated_back})
100
+ return translated_back
101
+
102
+ # === [LOGO LOAD] ===
103
+ logo = Image.open("AfriAI Solutions.jpg")
104
+
105
+ # === [GRADIO UI] ===
106
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo")) as demo:
107
+ with gr.Row():
108
+ gr.Image(value=logo, show_label=False, show_download_button=False, interactive=False, height=150)
109
+
110
+ gr.Markdown("""
111
+ # 🤖 Chatbot Médical AfriAI Solutions
112
+ **Posez votre question médicale en français.**
113
+ Le chatbot vous répondra brièvement et avec bienveillance, puis vous demandera si vous souhaitez plus de détails.
114
+ """, elem_id="title")
115
+
116
+ chatbot = gr.Chatbot(label="Chat avec le Médecin Virtuel")
117
+ msg = gr.Textbox(label="Votre question", placeholder="Exemple : Quels sont les symptômes du paludisme ?")
118
+ clear = gr.Button("Effacer la conversation", variant="secondary")
119
+
120
+ def respond(message, history):
121
+ response = medical_chatbot(message)
122
+ history = history or []
123
+ history.append((message, response))
124
+ return "", history
125
+
126
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
127
+ clear.click(lambda: ("", []), None, [msg, chatbot])
128
+
129
+ demo.launch()