Tonic commited on
Commit
190355d
·
1 Parent(s): 713bae4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -13
app.py CHANGED
@@ -16,14 +16,6 @@ def wrap_text(text, width=90):
16
  return wrapped_text
17
 
18
  def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
19
- """
20
- Generates text using a large language model, given a user input and a system prompt.
21
- Args:
22
- user_input: The user's input text to generate a response for.
23
- system_prompt: Optional system prompt.
24
- Returns:
25
- A string containing the generated text.
26
- """
27
  # Combine user input and system prompt
28
  formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
29
 
@@ -44,18 +36,14 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
44
  do_sample=True
45
  )
46
 
47
- # Decode the response
48
  response_text = tokenizer.decode(output[0], skip_special_tokens=True)
49
 
50
  return response_text
51
 
52
- # Define the device
53
  device = "cuda" if torch.cuda.is_available() else "cpu"
54
 
55
- # Use the base model's ID
56
  base_model_id = "OpenLLM-France/Claire-Mistral-7B-0.1"
57
 
58
- # Instantiate the Tokenizer
59
  tokenizer = AutoTokenizer.from_pretrained("OpenLLM-France/Claire-Mistral-7B-0.1", trust_remote_code=True, padding_side="left")
60
  tokenizer.pad_token = tokenizer.eos_token
61
  tokenizer.padding_side = 'left'
@@ -91,7 +79,7 @@ class ChatBot:
91
  bot = ChatBot()
92
 
93
  title = "👋🏻Welcome to Tonic's Claire Chat🚀"
94
- description = "You can use this Space to test out the current model (ClaireLLM) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together."
95
  examples = [["Oueche Normal, Claire, ça va ou quoi?", "bonjour je m'appele Claire et je suis une assistante francophone-first conçu par openLLM"]]
96
 
97
  iface = gr.Interface(
 
16
  return wrapped_text
17
 
18
  def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
 
 
 
 
 
 
 
 
19
  # Combine user input and system prompt
20
  formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
21
 
 
36
  do_sample=True
37
  )
38
 
 
39
  response_text = tokenizer.decode(output[0], skip_special_tokens=True)
40
 
41
  return response_text
42
 
 
43
  device = "cuda" if torch.cuda.is_available() else "cpu"
44
 
 
45
  base_model_id = "OpenLLM-France/Claire-Mistral-7B-0.1"
46
 
 
47
  tokenizer = AutoTokenizer.from_pretrained("OpenLLM-France/Claire-Mistral-7B-0.1", trust_remote_code=True, padding_side="left")
48
  tokenizer.pad_token = tokenizer.eos_token
49
  tokenizer.padding_side = 'left'
 
79
  bot = ChatBot()
80
 
81
  title = "👋🏻Welcome to Tonic's Claire Chat🚀"
82
+ description = "You can use this Space to test out the current model ([ClaireLLM](https://huggingface.co/OpenLLM-France/Claire-Mistral-7B-0.1)) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on [Discord to build together](https://discord.gg/nXx5wbX9)."
83
  examples = [["Oueche Normal, Claire, ça va ou quoi?", "bonjour je m'appele Claire et je suis une assistante francophone-first conçu par openLLM"]]
84
 
85
  iface = gr.Interface(